text
stringlengths
0
1.05M
meta
dict
"""An abstract for Widget class""" import abc class Widget(metaclass=abc.ABCMeta): def __init__(self): self.encoder_idx = 0 """Widget abstract""" @abc.abstractmethod def draw_widget(self, lcd, coords): """draw a tile""" pass @abc.abstractmethod def draw_values(self, lcd, coords, force=False): """draw a changed values. With force must redraw all values""" pass @abc.abstractmethod def change_values(self, values): """change a value, values is a dict [name] = value""" pass def draw_number(self, lcd, pos_x, pos_y, font, new, old, spacing=30, force=False): """draw a number""" lcd.transparency_color = font.get_transparency() for idx in range(0, len(new)): if force or old is None or new[idx] != old[idx]: lcd.draw_image( pos_x + (idx * spacing), pos_y, font.get(int(new[idx])) ) class Clickable(metaclass=abc.ABCMeta): """Interface for clickable widget""" @abc.abstractmethod def action(self, name, index, pos_x, pos_y): """action for touch""" return
{ "repo_name": "bkosciow/doton", "path": "view/widget.py", "copies": "1", "size": "1209", "license": "mit", "hash": 4243051097942344000, "line_mean": 27.1162790698, "line_max": 86, "alpha_frac": 0.5574855252, "autogenerated": false, "ratio": 3.887459807073955, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4944945332273955, "avg_score": null, "num_lines": null }
"""An abstract interface for a terminal that allows for text output and simple input. The default implementation assumes a simple text terminal. Extensions may provide implementations to be used in more advanced settings, like graphical user interfaces. """ class Terminal: """A simple text-based terminal. """ class Bformat: # pylint: disable=too-few-public-methods """Escape codes for color output. """ HEADER = '\033[95m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' ENDC = '\033[0m' class Bstatus: # pylint: disable=too-few-public-methods """Symbols for semantic markup. """ HEADER = '\033[95m' OK = '\033[92m' # GREEN OK2 = '\033[94m' # BLUE WARNING = '\033[93m' # YELLOW FAIL = '\033[91m' # RED class Markup: # pylint: disable=too-few-public-methods """Symbols for semantic markup. """ EMPHASIZE = '\033[94m' # BLUE def form(self, text: str, color: str = '') -> str: """Format a text in a given color. """ return color + text + self.Bformat.ENDC def status(self, text: str, status: str = None) -> str: """Format a text with a given status code. """ return self.form(text, status and getattr(self.Bstatus, status.upper())) def markup(self, text: str, markup: str = None) -> str: """Format a text with a given Markup code. """ return self.form(text, markup and getattr(self.Markup, markup.upper())) def output(self, message: str) -> None: """ """ print(message) DEFAULT_TERMINAL = Terminal()
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/util/terminal.py", "copies": "1", "size": "1845", "license": "mit", "hash": 3935772968536676000, "line_mean": 28.2857142857, "line_max": 72, "alpha_frac": 0.5506775068, "autogenerated": false, "ratio": 3.7885010266940453, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48391785334940457, "avg_score": null, "num_lines": null }
""" An abstract precompiler task for the `ligament` task automator """ import os import glob from ligament.helpers import ( remove_dups, zip_with_output, mkdir_recursive, partition, capture_exception, pdebug) from ligament.buildtarget import BuildTarget from ligament.exceptions import TaskExecutionException class Precompiler(BuildTarget): """ A reusable template for a precompiler task classes that extend Precompiler must do the following at minimum: declare external_template_string to a template string with a single %s, where the value of the compiled filename will be placed declare embed_template_string to a template string with a single %s, where the compiled file's text will be placed declare out_path_of declare compile_file """ external_template_string = None embed_template_string = None def __init__(self, minify=True, embed=True, concat=True, source_dir=None, target_dir=None, build_targets=[], relative_directory="./", external_template_string=None, embed_template_string=None, **kwargs): BuildTarget.__init__(self, **kwargs) self.relative_directory = relative_directory self.input_directory = os.path.abspath(source_dir) self.output_directory = os.path.abspath(target_dir) self.compiler_name = "???" pdebug(self.input_directory) pdebug(self.output_directory) self.build_targets = [os.path.abspath( os.path.join( self.input_directory, target)) for target in build_targets] self.file_watch_targets = self.build_targets if embed_template_string: self.embed_template_string = embed_template_string if external_template_string: self.external_template_string = external_template_string self.minify = minify self.embed = embed self.concat = concat def out_path_of(self, in_path): """given the input path of a file, return the ouput path""" raise Exception("Precompiler out_path_of not implemented!") def compile_file(self, path): """given the path of a file, compile it and return the result""" raise Exception("Precompiler compile_file not implemented!") @capture_exception @zip_with_output(skip_args=[0]) def compile_and_process(self, in_path): """compile a file, save it to the ouput file if the inline flag true""" out_path = self.path_mapping[in_path] if not self.embed: pdebug("[%s::%s] %s -> %s" % ( self.compiler_name, self.name, os.path.relpath(in_path), os.path.relpath(out_path)), groups=["build_task"], autobreak=True) else: pdebug("[%s::%s] %s -> <cache>" % ( self.compiler_name, self.name, os.path.relpath(in_path)), groups=["build_task"], autobreak=True) compiled_string = self.compile_file(in_path) if not self.embed: if compiled_string != "": with open(out_path, "w") as f: f.write(compiled_string) return compiled_string def collect_output(self): """ helper function to gather the results of `compile_and_process` on all target files """ if self.embed: if self.concat: concat_scripts = [self.compiled_scripts[path] for path in self.build_order] return [self.embed_template_string % '\n'.join(concat_scripts)] else: return [self.embed_template_string % self.compiled_scripts[path] for path in self.build_order] else: return [self.external_template_string % os.path.join( self.relative_directory, os.path.relpath( self.out_path_of(path), self.output_directory)) for path in self.build_order if self.compiled_scripts[path] != ""] def build(self): """build the scripts and return a string""" if not self.embed: mkdir_recursive(self.output_directory) # get list of script files in build order self.build_order = remove_dups( reduce(lambda a, b: a + glob.glob(b), self.build_targets, [])) self.build_order_output = [self.out_path_of(t) for (t) in self.build_order] self.path_mapping = dict(zip( self.build_order, self.build_order_output)) self.compiled_scripts = {} exceptions, values = partition( lambda x: isinstance(x, Exception), [self.compile_and_process(target) for target in self.build_order]) self.compiled_scripts.update(dict(values)) saneExceptions, insaneExceptions = partition( lambda x: isinstance(x, TaskExecutionException), exceptions) if len(insaneExceptions) != 0: raise insaneExceptions[0] if len(exceptions) != 0: raise TaskExecutionException( "Precompiler Errors (%s):" % type(self).__name__, "\n".join([ x.header + "\n " + x.message.replace("\n", "\n ") for x in exceptions])) return self.collect_output() def update_build(self, updated_files): """ updates a build based on updated files TODO implement this pls """ for f in updated_files: self.compiled_scripts[f] = self.compile_and_process(f) return self.collect_output()
{ "repo_name": "Adjective-Object/ligament", "path": "ligament_precompiler_template/__init__.py", "copies": "1", "size": "6275", "license": "apache-2.0", "hash": 4861530348135379000, "line_mean": 32.0263157895, "line_max": 79, "alpha_frac": 0.5370517928, "autogenerated": false, "ratio": 4.501434720229555, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5538486513029555, "avg_score": null, "num_lines": null }
""" An abstract Table >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> deadbeats = accounts['name'][accounts['amount'] < 0] """ from __future__ import absolute_import, division, print_function from datashape import dshape, var, DataShape, Record, isdimension import datashape import operator from toolz import concat, partial, first, pipe, compose import toolz from toolz.curried import filter from . import scalar from .core import Expr, Scalar from .scalar import ScalarSymbol from .scalar import * from ..utils import unique from ..compatibility import _strtypes, builtins class TableExpr(Expr): """ Super class for all Table Expressions """ @property def dshape(self): return datashape.var * self.schema @property def columns(self): if isinstance(self.schema[0], Record): return self.schema[0].names @property def dtype(self): ds = self.schema[-1] if isinstance(ds, Record): if len(ds.fields.values()) > 1: raise TypeError("`.dtype` not defined for multicolumn object. " "Use `.schema` instead") else: return dshape(first(ds.fields.values())) else: return dshape(ds) def __getitem__(self, key): if isinstance(key, _strtypes): if key not in self.columns: raise ValueError("Mismatched Column: %s" % str(key)) return Column(self, key) if isinstance(key, list) and all(isinstance(k, _strtypes) for k in key): key = tuple(key) if not all(col in self.columns for col in key): raise ValueError("Mismatched Columns: %s" % str(key)) return Projection(self, tuple(key)) if isinstance(key, TableExpr): return Selection(self, key) raise TypeError("Did not understand input: %s[%s]" % (self, key)) def sort(self, key=None, ascending=True): """ Sort table Parameters ---------- key: string, list of strings, TableExpr Defines by what you want to sort. Either: A single column string, ``t.sort('amount')`` A list of column strings, ``t.sort(['name', 'amount'])`` A Table Expression, ``t.sort(-t['amount'])`` ascending: bool Determines order of the sort """ if key is None: key = self.columns[0] return Sort(self, key, ascending) def head(self, n=10): return Head(self, n) def relabel(self, labels): return ReLabel(self, labels) def map(self, func, schema=None): return Map(self, func, schema) def count(self): return count(self) def distinct(self): return Distinct(self) def nunique(self): return nunique(self) def ancestors(self): return (self,) @property def iscolumn(self): if len(self.columns) > 1: return False raise NotImplementedError("%s.iscolumn not implemented" % str(type(self).__name__)) class TableSymbol(TableExpr): """ A Symbol for Tabular data This is a leaf in the expression tree >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> accounts['amount'] + 1 accounts['amount'] + 1 We define a TableSymbol with a name like ``accounts`` and the datashape of a single row, called a schema. """ __slots__ = 'name', 'schema', 'iscolumn' def __init__(self, name, schema, iscolumn=False): self.name = name self.schema = dshape(schema) self.iscolumn = iscolumn def __str__(self): return self.name def ancestors(self): return (self,) def resources(self): return dict() class RowWise(TableExpr): def ancestors(self): return (self,) + self.parent.ancestors() class Projection(RowWise): """ Select columns from table SELECT a, b, c FROM table >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> accounts[['name', 'amount']].schema dshape("{ name : string, amount : int32 }") """ __slots__ = 'parent', '_columns' def __init__(self, table, columns): self.parent = table self._columns = tuple(columns) @property def columns(self): return self._columns @property def schema(self): d = self.parent.schema[0].fields return DataShape(Record([(col, d[col]) for col in self.columns])) def __str__(self): return '%s[[%s]]' % (self.parent, ', '.join(["'%s'" % col for col in self.columns])) @property def iscolumn(self): return False class ColumnSyntaxMixin(object): def __eq__(self, other): return columnwise(Eq, self, other) def __ne__(self, other): return columnwise(NE, self, other) def __lt__(self, other): return columnwise(LT, self, other) def __le__(self, other): return columnwise(LE, self, other) def __gt__(self, other): return columnwise(GT, self, other) def __ge__(self, other): return columnwise(GE, self, other) def __add__(self, other): return columnwise(Add, self, other) def __radd__(self, other): return columnwise(Add, other, self) def __mul__(self, other): return columnwise(Mul, self, other) def __rmul__(self, other): return columnwise(Mul, other, self) def __div__(self, other): return columnwise(Div, self, other) __truediv__ = __div__ def __rdiv__(self, other): return columnwise(Div, other, self) def __sub_(self, other): return columnwise(Sub, self, other) def __rsub__(self, other): return columnwise(Sub, other, self) def __pow__(self, other): return columnwise(Pow, self, other) def __rpow__(self, other): return columnwise(Pow, other, self) def __mod__(self, other): return columnwise(Mod, self, other) def __rmod__(self, other): return columnwise(Mod, other, self) def __or__(self, other): return columnwise(Or, self, other) def __ror__(self, other): return columnwise(Or, other, self) def __and__(self, other): return columnwise(And, self, other) def __rand__(self, other): return columnwise(And, other, self) def __neg__(self): return columnwise(Neg, self) def label(self, label): return Label(self, label) def sum(self): return sum(self) def min(self): return min(self) def max(self): return max(self) def any(self): return any(self) def all(self): return all(self) def mean(self): return mean(self) def var(self): return var(self) def std(self): return std(self) iscolumn = True class Column(ColumnSyntaxMixin, Projection): """ A single column from a table SELECT a FROM table >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> accounts['name'].schema dshape("{ name : string }") """ __slots__ = 'parent', 'column' __hash__ = Expr.__hash__ iscolumn = True def __init__(self, table, column): self.parent = table self.column = column @property def columns(self): return (self.column,) def __str__(self): return "%s['%s']" % (self.parent, self.columns[0]) @property def scalar_symbol(self): return ScalarSymbol(self.column, dtype=self.dtype) class Selection(TableExpr): """ Filter rows of table based on predicate >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> deadbeats = accounts[accounts['amount'] < 0] """ __slots__ = 'parent', 'predicate' def __init__(self, table, predicate): if predicate.dtype != dshape('bool'): raise TypeError("Must select over a boolean predicate. Got:\n" "%s[%s]" % (table, predicate)) self.parent = table self.predicate = predicate # A Relational def __str__(self): return "%s[%s]" % (self.parent, self.predicate) @property def schema(self): return self.parent.schema @property def iscolumn(self): return self.parent.iscolumn def columnwise(op, *column_inputs): """ Merge columns with scalar operation Parameters ---------- op - Scalar Operation like Add, Mul, Sin, Exp column_inputs - either Column, ColumnWise or constant (like 1, 1.0, '1') >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> columnwise(Add, accounts['amount'], 100) accounts['amount'] + 100 Fuses operations down into ScalarExpr level >>> columnwise(Mul, 2, (accounts['amount'] + 100)) 2 * (accounts['amount'] + 100) """ expr_inputs = [] parents = set() for col in column_inputs: if isinstance(col, ColumnWise): expr_inputs.append(col.expr) parents.add(col.parent) elif isinstance(col, Column): # TODO: specify dtype expr_inputs.append(col.scalar_symbol) parents.add(col.parent) else: # maybe something like 5 or 'Alice' expr_inputs.append(col) if not len(parents) == 1: raise ValueError("All inputs must be from same Table.\n" "Saw the following tables: %s" % ', '.join(map(str, parents))) expr = op(*expr_inputs) return ColumnWise(first(parents), expr) class ColumnWise(RowWise, ColumnSyntaxMixin): """ Apply Scalar Expression onto columns of data Parameters ---------- parent - TableExpr expr - ScalarExpr The names of the varibles within the scalar expr must match the columns of the parent. Use ``Column.scalar_variable`` to generate the appropriate ScalarSymbol >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> expr = Add(accounts['amount'].scalar_symbol, 100) >>> ColumnWise(accounts, expr) accounts['amount'] + 100 """ __slots__ = 'parent', 'expr' def __init__(self, parent, expr): self.parent = parent self.expr = expr __hash__ = Expr.__hash__ iscolumn = True @property def schema(self): return self.expr.dshape def __str__(self): columns = self.active_columns() newcol = lambda c: "%s['%s']" % (self.parent, c) return eval_str(self.expr.subs(dict(zip(columns, map(newcol, columns))))) def active_columns(self): return sorted(unique(x.name for x in self.traverse() if isinstance(x, ScalarSymbol))) class Join(TableExpr): """ Join two tables on common columns Parameters ---------- lhs : TableExpr rhs : TableExpr on_left : string on_right : string >>> names = TableSymbol('names', '{name: string, id: int}') >>> amounts = TableSymbol('amounts', '{amount: int, id: int}') Join tables based on shared column name >>> joined = Join(names, amounts, 'id') Join based on different column names >>> amounts = TableSymbol('amounts', '{amount: int, acctNumber: int}') >>> joined = Join(names, amounts, 'id', 'acctNumber') """ __slots__ = 'lhs', 'rhs', 'on_left', 'on_right' iscolumn = False def __init__(self, lhs, rhs, on_left, on_right=None): self.lhs = lhs self.rhs = rhs if not on_right: on_right = on_left self.on_left = on_left self.on_right = on_right if lhs.schema[0][on_left] != rhs.schema[0][on_right]: raise TypeError("Schema's of joining columns do not match") @property def schema(self): rec1 = self.lhs.schema[0] rec2 = self.rhs.schema[0] rec = rec1.parameters[0] + tuple((k, v) for k, v in rec2.parameters[0] if k != self.on_right) return dshape(Record(rec)) sqrt = partial(columnwise, scalar.sqrt) sin = partial(columnwise, scalar.sin) cos = partial(columnwise, scalar.cos) tan = partial(columnwise, scalar.tan) sinh = partial(columnwise, scalar.sinh) cosh = partial(columnwise, scalar.cosh) tanh = partial(columnwise, scalar.tanh) acos = partial(columnwise, scalar.acos) acosh = partial(columnwise, scalar.acosh) asin = partial(columnwise, scalar.asin) asinh = partial(columnwise, scalar.asinh) atan = partial(columnwise, scalar.atan) atanh = partial(columnwise, scalar.atanh) exp = partial(columnwise, scalar.exp) log = partial(columnwise, scalar.log) expm1 = partial(columnwise, scalar.expm1) log10 = partial(columnwise, scalar.log10) log1p = partial(columnwise, scalar.log1p) radians = partial(columnwise, scalar.radians) degrees = partial(columnwise, scalar.degrees) ceil = partial(columnwise, scalar.ceil) floor = partial(columnwise, scalar.floor) trunc = partial(columnwise, scalar.trunc) isnan = partial(columnwise, scalar.isnan) class Reduction(Scalar): """ A column-wise reduction >>> t = TableSymbol('t', '{name: string, amount: int, id: int}') >>> e = t['amount'].sum() >>> data = [['Alice', 100, 1], ... ['Bob', 200, 2], ... ['Alice', 50, 3]] >>> from blaze.compute.python import compute >>> compute(e, data) 350 """ __slots__ = 'parent', def __init__(self, table): self.parent = table @property def dshape(self): return self.parent.dshape.subarray(1) @property def symbol(self): return type(self).__name__ class any(Reduction): pass class all(Reduction): pass class sum(Reduction): pass class max(Reduction): pass class min(Reduction): pass class mean(Reduction): pass class var(Reduction): pass class std(Reduction): pass class count(Reduction): pass class nunique(Reduction): pass class By(TableExpr): """ Split-Apply-Combine Operator >>> t = TableSymbol('t', '{name: string, amount: int, id: int}') >>> e = By(t, t['name'], t['amount'].sum()) >>> data = [['Alice', 100, 1], ... ['Bob', 200, 2], ... ['Alice', 50, 3]] >>> from blaze.compute.python import compute >>> compute(e, data) #doctest: +SKIP {'Alice': 150, 'Bob': 200} """ __slots__ = 'parent', 'grouper', 'apply' iscolumn = False def __init__(self, parent, grouper, apply): self.parent = parent s = TableSymbol('', parent.schema, parent.iscolumn) self.grouper = grouper.subs({parent: s}) self.apply = apply.subs({parent: s}) if isdimension(self.apply.dshape[0]): raise TypeError("Expected Reduction") @property def schema(self): group = self.grouper.schema[0].parameters[0] if isinstance(self.apply.dshape[0], Record): apply = self.apply.dshape[0].parameters[0] else: apply = (('0', self.apply.dshape),) params = unique(group + apply, key=lambda x: x[0]) return dshape(Record(list(params))) class Sort(TableExpr): """ Table in sorted order >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> accounts.sort('amount', ascending=False).schema dshape("{ name : string, amount : int32 }") Some backends support sorting by arbitrary rowwise tables, e.g. >>> accounts.sort(-accounts['amount']) # doctest: +SKIP """ __slots__ = 'parent', 'column', 'ascending' def __init__(self, parent, column, ascending=True): self.parent = parent self.column = column self.ascending = ascending @property def schema(self): return self.parent.schema @property def iscolumn(self): return self.parent.iscolumn class Distinct(TableExpr): """ Distinct elements filter >>> t = TableSymbol('t', '{name: string, amount: int, id: int}') >>> e = Distinct(t) >>> data = [('Alice', 100, 1), ... ('Bob', 200, 2), ... ('Alice', 100, 1)] >>> from blaze.compute.python import compute >>> sorted(compute(e, data)) [('Alice', 100, 1), ('Bob', 200, 2)] """ __slots__ = 'parent', def __init__(self, table): self.parent = table @property def schema(self): return self.parent.schema @property def iscolumn(self): return self.parent.iscolumn class Head(TableExpr): """ First ``n`` elements of table >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> accounts.head(5).dshape dshape("5 * { name : string, amount : int32 }") """ __slots__ = 'parent', 'n' def __init__(self, parent, n=10): self.parent = parent self.n = n @property def schema(self): return self.parent.schema @property def dshape(self): return self.n * self.schema @property def iscolumn(self): return self.parent.iscolumn class Label(RowWise, ColumnSyntaxMixin): """ A Labeled column >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> (accounts['amount'] * 100).schema dshape("float64") >>> (accounts['amount'] * 100).label('new_amount').schema #doctest: +SKIP dshape("{ new_amount : float64 }") """ __slots__ = 'parent', 'label' def __init__(self, parent, label): self.parent = parent self.label = label @property def schema(self): if isinstance(self.parent.schema[0], Record): dtype = self.parent.schema[0].fields.values()[0] else: dtype = self.parent.schema[0] return DataShape(Record([[self.label, dtype]])) class ReLabel(RowWise): """ Table with same content but new labels >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> accounts.schema dshape("{ name : string, amount : int32 }") >>> accounts.relabel({'amount': 'balance'}).schema dshape("{ name : string, balance : int32 }") """ __slots__ = 'parent', 'labels' def __init__(self, parent, labels): self.parent = parent if isinstance(labels, dict): # Turn dict into tuples labels = tuple(sorted(labels.items())) self.labels = labels @property def schema(self): subs = dict(self.labels) d = self.parent.schema[0].fields return DataShape(Record([[subs.get(name, name), dtype] for name, dtype in self.parent.schema[0].parameters[0]])) @property def iscolumn(self): return self.parent.iscolumn class Map(RowWise): """ Map an arbitrary Python function across rows in a Table >>> from datetime import datetime >>> t = TableSymbol('t', '{price: real, time: int64}') # times as integers >>> datetimes = t['time'].map(datetime.utcfromtimestamp) Optionally provide extra schema information >>> datetimes = t['time'].map(datetime.utcfromtimestamp, ... schema='{time: datetime}') See Also: Apply """ __slots__ = 'parent', 'func', '_schema' def __init__(self, parent, func, schema=None): self.parent = parent self.func = func self._schema = schema @property def schema(self): if self._schema: return dshape(self._schema) else: raise NotImplementedError() class Apply(TableExpr): """ Apply an arbitrary Python function onto a Table >>> t = TableSymbol('t', '{name: string, amount: int}') >>> h = Apply(hash, t) # Hash value of resultant table Optionally provide extra datashape information >>> h = Apply(hash, t, dshape='real') Apply brings a function within the expression tree. The following transformation is often valid Before ``compute(Apply(f, expr), ...)`` After ``f(compute(expr, ...)`` See Also: Map """ __slots__ = 'parent', 'func', '_dshape' def __init__(self, func, parent, dshape=None): self.parent = parent self.func = func self._dshape = dshape @property def schema(self): if isdimension(self.dshape[0]): return self.dshape.subshape[0] else: return TypeError("Non-tabular datashape, %s" % self.dshape) @property def dshape(self): if self._dshape: return dshape(self._dshape) else: return NotImplementedError("Datashape of arbitrary Apply not defined") def common_ancestor(*tables): """ Common ancestor between subtables >>> t = TableSymbol('t', '{x: int, y: int}') >>> common_ancestor(t['x'], t['y']) t """ sets = [set(t.ancestors()) for t in tables] return builtins.max(set.intersection(*sets), key=compose(len, str)) def merge(*tables): # Get common ancestor parent = common_ancestor(*tables) if not parent: raise ValueError("No common ancestor found for input tables") shim = TableSymbol('_ancestor', parent.schema, parent.iscolumn) tables = tuple(t.subs({parent: shim}) for t in tables) return Merge(parent, tables) class Merge(RowWise): """ Merge many Tables together Must all descend from same table via RowWise operations >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> newamount = (accounts['amount'] * 1.5).label('new_amount') >>> merge(accounts, newamount).columns ['name', 'amount', 'new_amount'] """ __slots__ = 'parent', 'children' iscolumn = False def __init__(self, parent, children): # TODO: Assert all parents descend from the same parent via RowWise # operations self.parent = parent self.children = children @property def schema(self): for c in self.children: if not isinstance(c.schema[0], Record): raise TypeError("All schemas must have Record shape. Got %s" % c.schema[0]) return dshape(Record(list(concat(c.schema[0].parameters[0] for c in self.children))))
{ "repo_name": "aterrel/blaze", "path": "blaze/expr/table.py", "copies": "1", "size": "22627", "license": "bsd-3-clause", "hash": -5774427155019287000, "line_mean": 25.9369047619, "line_max": 82, "alpha_frac": 0.5781588368, "autogenerated": false, "ratio": 3.8958333333333335, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9969981445298518, "avg_score": 0.0008021449669630183, "num_lines": 840 }
""" An abstract Table >>> accounts = TableSymbol('accounts', '{name: string, amount: int}') >>> deadbeats = accounts.name[accounts.amount < 0] """ from __future__ import absolute_import, division, print_function import datashape from datashape.predicates import isscalar, iscollection, isrecord from .expressions import Symbol from ..compatibility import _strtypes __all__ = ['TableSymbol'] def TableSymbol(name, dshape): """ A Symbol for Tabular data This is a leaf in the expression tree Examples -------- >>> accounts = TableSymbol('accounts', ... '{name: string, amount: int, id: int}') >>> accounts.amount + 1 accounts.amount + 1 We define a TableSymbol with a name like ``accounts`` and the datashape of a single row, called a schema. """ if isinstance(dshape, _strtypes): dshape = datashape.dshape(dshape) if not iscollection(dshape): dshape = datashape.var * dshape return Symbol(name, dshape) def columns(expr): return expr.fields from .expressions import dshape_method_list, method_properties dshape_method_list.extend([ (lambda ds: len(ds.shape) == 1 and isrecord(ds.measure), set([columns])) ]) method_properties.add(columns)
{ "repo_name": "cowlicks/blaze", "path": "blaze/expr/table.py", "copies": "15", "size": "1260", "license": "bsd-3-clause", "hash": 6575153809470775000, "line_mean": 24.7142857143, "line_max": 78, "alpha_frac": 0.6658730159, "autogenerated": false, "ratio": 3.7611940298507465, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00032393909944930353, "num_lines": 49 }
"""An account classed based views. .. module:: account.views :platform: Linux, Unix :synopsis: Classed based views for accounts handling operations .. moduleauthor:: Nickolas Fox <tarvitz@blacklibrary.ru> """ # -*- coding: utf-8 -*- import six from apps.accounts.decorators import prevent_bruteforce from django.shortcuts import get_object_or_404 from django.http import Http404 from django.core.urlresolvers import reverse, reverse_lazy from django.conf import settings from django.core.mail import send_mail from django.utils.translation import ugettext_lazy as _ from django.views import generic from django.shortcuts import redirect from django.utils.decorators import method_decorator from apps.accounts.models import UserSID, User from apps.core.views import LoginRequiredMixin from apps.accounts.forms import ( LoginForm, PasswordRestoreInitiateForm, PasswordChangeForm, PasswordRestoreForm ) from django.contrib import auth class IndexView(generic.TemplateView): """Index view""" template_name = 'index.html' class LoginView(generic.FormView): """ LoginView serves for account login into current web application """ template_name = 'accounts/login.html' form_class = LoginForm success_url = reverse_lazy('core:index') def form_valid(self, form): form_valid = super(LoginView, self).form_valid(form) auth.login(self.request, form.cleaned_data['user']) return form_valid class LogoutView(generic.TemplateView): """ LogoutView serves for log out registered and logged in user for current web app """ template_name = 'index.html' def get(self, request, *args, **kwargs): auth.logout(request) return self.render_to_response({}) class PasswordRestoreInitiateView(generic.FormView): """ Password Restore Initiate View starts password restore process (initiates) """ template_name = 'accounts/password_restore_initiate.html' form_class = PasswordRestoreInitiateForm success_url = reverse_lazy('accounts:password-restore-initiated') def form_valid(self, form): users = form.cleaned_data['users'] sids = UserSID.objects.filter(user__in=users, expired=True) sids = [] if not sids: for user in users: sid = UserSID.objects.create(user) sids.append(sid) else: for user in users: sid = UserSID.objects.filter( user=self.request.user).order_by('-id')[0] sids.append(sid) for sid in sids: msg = settings.PASSWORD_RESTORE_REQUEST_MESSAGE % { 'link': settings.SITE_URL + reverse( 'accounts:password-restore', args=(sid.sid, )), 'url': settings.CONTACT_URL } send_mail( subject=six.text_type(_('Your password requested to change')), message=six.text_type(msg), from_email=settings.EMAIL_FROM, recipient_list=[sid.user.email] ) return redirect(self.get_success_url()) class PasswordRestoreView(generic.FormView): """ Password Restore View restores forgotten password """ form_class = PasswordRestoreForm template_name = 'accounts/password_restore.html' success_url = reverse_lazy('accounts:password-restored') @method_decorator(prevent_bruteforce) def dispatch(self, request, *args, **kwargs): return super(PasswordRestoreView, self).dispatch(request, *args, **kwargs) def get_user_sid_instance(self): if not hasattr(self, 'user_sid'): self.user_sid = get_object_or_404( UserSID, sid=self.kwargs.get('sid', 0), expired=False) if not self.user_sid: self.request.session['brute_force_iter'] \ = self.request.session.get('brute_force_iter', 0) + 1 self.request.session.save() raise Http404("not found") return self.user_sid def get_form_kwargs(self): kwargs = super(PasswordRestoreView, self).get_form_kwargs() kwargs.update({ 'instance': self.get_user_sid_instance(), 'request': self.request }) return kwargs def form_valid(self, form): form.save() return redirect(self.get_success_url()) class PasswordChangeView(LoginRequiredMixin, generic.FormView): """ Password Change View starts password change process (if user remembers his current password and he/she is logged in) """ form_class = PasswordChangeForm model = User success_url = reverse_lazy('accounts:password-changed') template_name = 'accounts/password_change.html' def get_form_kwargs(self): kwargs = super(PasswordChangeView, self).get_form_kwargs() kwargs.update({ 'instance': self.request.user }) return kwargs def form_valid(self, form): form.save() return redirect(self.get_success_url()) class ProfileView(LoginRequiredMixin, generic.TemplateView): """ Profile View show user profile """ template_name = 'accounts/profile.html'
{ "repo_name": "tarvitz/djtp", "path": "apps/accounts/views.py", "copies": "1", "size": "5328", "license": "bsd-3-clause", "hash": -5376240365863480000, "line_mean": 29.4457142857, "line_max": 78, "alpha_frac": 0.637012012, "autogenerated": false, "ratio": 4.098461538461539, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5235473550461538, "avg_score": null, "num_lines": null }
from __future__ import division import numpy as np import subprocess import json class FFmpeg: def __init__(self, filename): self.filename = filename self.proc = None self.__set_video_info() def get_video_info(self): return (self.h, self.w, self.fps) def __set_video_info(self): cmd = 'ffmpeg/ffprobe -v quiet -print_format json -show_streams ..\%s.avi' % self.filename proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) info = json.loads(proc.stdout.read()) self.h = info['streams'][0]['height'] self.w = info['streams'][0]['width'] fps = info['streams'][0]['r_frame_rate'].split('/') self.fps = int(fps[0]) / int(fps[1]) self.nframes = int(info['streams'][0]['nb_frames']) def get_frame_by_number(self, f): cmd = 'ffmpeg/ffmpeg -i ../%s.avi -v quiet -f rawvideo -pix_fmt rgb24 -ss %f -vframes 1 -' % (self.filename, f / self.fps) proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) im = np.fromstring(proc.stdout.read(), dtype='uint8') im.shape = (self.h, self.w, 3) return im def get_first_frame(self): if self.proc: self.proc.kill() cmd = 'ffmpeg/ffmpeg -i ../%s.avi -v quiet -f rawvideo -pix_fmt rgb24 -' % self.filename self.proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) return self.get_next_frame() def get_next_frame(self): if self.proc: im = np.fromstring(self.proc.stdout.read(self.h * self.w * 3), dtype='uint8') if im.size == 0: self.proc = None return None return im.reshape(self.h, self.w, 3) else: return self.get_first_frame() def close(self): if self.proc: self.proc.kill() self.proc = None
{ "repo_name": "samehkhamis/pyffmpeg", "path": "ffmpeg.py", "copies": "1", "size": "2129", "license": "bsd-2-clause", "hash": -7411813572966561000, "line_mean": 35.350877193, "line_max": 130, "alpha_frac": 0.5617660874, "autogenerated": false, "ratio": 3.5016447368421053, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45634108242421056, "avg_score": null, "num_lines": null }
"""An achievement which can be reached in a pinball machine.""" from mpf.core.device_monitor import DeviceMonitor from mpf.core.events import event_handler from mpf.core.mode import Mode from mpf.core.mode_device import ModeDevice from mpf.core.player import Player @DeviceMonitor("state", "selected") class Achievement(ModeDevice): """An achievement in a pinball machine. It is tracked per player and can automatically restore state on the next ball. """ config_section = 'achievements' collection = 'achievements' class_label = 'achievement' allow_empty_configs = True def __init__(self, machine, name): """Initialise achievement.""" super().__init__(machine, name) self._player = None self._mode = None self._show = None @property def state(self): """Return current state.""" try: return self._player.achievements[self.name][0] except (AttributeError, KeyError): return None @state.setter def state(self, value): """Set current state.""" try: self._player.achievements[self.name][0] = value except (AttributeError, KeyError): self._player.achievements[self.name] = [value, False] @property def can_be_selected_for_start(self): """Return if this achievement can be selected and started.""" state = self.state return state == 'enabled' or (state == 'stopped' and self.config['restart_after_stop_possible']) @property def selected(self): """Return current selection state.""" try: return self._player.achievements[self.name][1] except (AttributeError, KeyError): return False @selected.setter def selected(self, value): """Set current selected.""" try: self._player.achievements[self.name][1] = value except (AttributeError, KeyError): self._player.achievements[self.name] = [None, value] def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None) -> dict: """Validate and parse config.""" config = super().validate_and_parse_config(config, is_mode_config, debug_prefix) states = ['disabled', 'enabled', 'started', 'stopped', 'selected', 'completed'] for state in states: if not config['events_when_{}'.format(state)]: config['events_when_{}'.format(state)] = [ "achievement_{}_state_{}".format(self.name, state)] return config def enable(self): """Enable the achievement. It can only start if it was enabled before. """ super().enable() if self.state in ("disabled", "started"): self.state = "enabled" self._run_state() @event_handler(5) def event_start(self, **kwargs): """Event handler for start event.""" del kwargs self.start() def start(self): """Start achievement.""" if self.state == "enabled" or ( self.config['restart_after_stop_possible'] and self.state == "stopped"): self.state = "started" self.selected = False self._run_state() @event_handler(4) def event_complete(self, **kwargs): """Event handler for complete event.""" del kwargs self.complete() def complete(self): """Complete achievement.""" if self.state == "started": self.state = "completed" self.selected = False self._run_state() @event_handler(2) def event_stop(self, **kwargs): """Event handler for stop event.""" del kwargs self.stop() def stop(self): """Stop achievement.""" if self.state == "started": self.state = "stopped" self.selected = False self._run_state() @event_handler(0) def event_disable(self, **kwargs): """Event handler for disable event.""" del kwargs self.disable() def disable(self): """Disable achievement.""" if self.state == "enabled" or ( self.config['restart_after_stop_possible'] and self.state == "stopped"): self.state = "disabled" self.selected = False self._run_state() @event_handler(1) def event_reset(self, **kwargs): """Event handler for reset event.""" del kwargs self.reset() def reset(self): """Reset the achievement to its initial state.""" # if there is no player active if not self._player: return self.selected = False if self.config['start_enabled'] is True: self.state = "enabled" elif self.config['start_enabled'] is False: self.state = "disabled" elif self.config['enable_events']: self.state = "disabled" else: self.state = "enabled" self._run_state() @event_handler(8) def event_unselect(self, **kwargs): """Event handler for unselect event.""" del kwargs self.unselect() def unselect(self): """Remove highlight (unselect) this achievement.""" if not self._player: return self.debug_log("Unselecting achievement") if self.selected: self.selected = False self._run_state() @event_handler(9) def event_select(self, **kwargs): """Event handler for select event.""" del kwargs self.select() def select(self): """Highlight (select) this achievement.""" if not self._player: return self.debug_log("Selecting achievement") if (self.state == 'enabled' or (self.config['restart_after_stop_possible'] and self.state == "stopped")) and not self.selected: self.selected = True self._run_state() def _run_state(self, restore=False): """Run shows and post events for current step.""" self.machine.events.post("achievement_{}_changed_state".format(self.name), restore=restore, state=self.state, selected=self.selected) '''event: achievement_(name)_changed_state desc: Achievement (name) changed state. Valid states are: disabled, enabled, started, completed, stopped This is only posted once per state. Its also posted on restart on the next ball to restore state. args: restore: true if this is reposted to restore state state: Current state selected: Whatever this achievement is selected currently ''' for event in self.config['events_when_{}'.format(self.state)]: self.machine.events.post(event, restore=restore, state=self.state, selected=self.selected) '''event: achievement_(name)_state_(state) desc: Achievement (name) changed to state (state). Valid states are: disabled, enabled, started, completed, stopped This is only posted once per state. Its also posted on restart on the next ball to restore state and when selection changes. args: restore: true if this is reposted to restore state state: Current state selected: Whatever this achievement is selected currently ''' if self.selected: for event in self.config['events_when_selected']: self.machine.events.post(event, restore=restore, state=self.state, selected=self.selected) # same as above if self._show: self.debug_log('Stopping show: %s', self._show) self._show.stop() self._show = None if self.selected and self.config['show_when_selected']: show = self.config['show_when_selected'] else: show = self.config['show_when_' + self.state] if show: self.debug_log('Playing show: %s. Priority: %s. Loops: -1. ' 'Show tokens: %s', show.name, self._mode.priority, self.config['show_tokens']) self._show = show.play( priority=self._mode.priority, loops=-1, sync_ms=self.config['sync_ms'], show_tokens=self.config['show_tokens']) def device_loaded_in_mode(self, mode: Mode, player: Player): """Load device on mode start and restore state. Args: ---- mode: mode which was contains the device player: player which is currently active """ self._player = player self._mode = mode if not self._player.achievements: self._player.achievements = dict() # type: ignore if self.name not in self._player.achievements: self.reset() else: self._restore_state() # state might have changed self.notify_virtual_change("selected", None, self.state) # type: ignore def _restore_state(self): if self.state == "started" and not ( self.config['restart_on_next_ball_when_started']): self.state = "stopped" elif self.state == "enabled" and not ( self.config['enable_on_next_ball_when_enabled']): self.state = "disabled" else: # state might still have changed because of player change self.notify_virtual_change("state", None, self.state) self._run_state(restore=True) def device_removed_from_mode(self, mode: Mode): """Mode ended. Args: ---- mode: mode which stopped """ del mode self._player = None self._mode = None if self._show: self._show.stop() self._show = None def add_control_events_in_mode(self, mode: Mode) -> None: """Override the default mode device behavior. Achievements use sophisticated logic to handle their mode-starting states during device_loaded_in_mode(). Therefore no default enabling is required. """
{ "repo_name": "missionpinball/mpf", "path": "mpf/devices/achievement.py", "copies": "1", "size": "10344", "license": "mit", "hash": -3471828787687972400, "line_mean": 31.2242990654, "line_max": 112, "alpha_frac": 0.5697022428, "autogenerated": false, "ratio": 4.313594662218516, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5383296905018515, "avg_score": null, "num_lines": null }
#!/anaconda2/bin/python2 '''Testing script.''' import os import os.path as op import sys import matplotlib as mpl import matplotlib.pyplot as plt from cycler import cycler import numpy as np import datetime as dtime import subprocess as sp import warnings import collections from exactpack.solvers.riemann import Sod exactpath = op.abspath(op.dirname(__file__)) sourcepath = op.dirname(exactpath) rsltpath = op.join(sourcepath,'Results') binpath = op.join(sourcepath,'bin') #Binary directory gitpath = op.dirname(sourcepath) #Top level of git repo plotpath = op.join(op.join(gitpath, "ResultPlots"), "ExactTesting") #Folder for plots modpath = op.join(gitpath, "pyAnalysisTools") os.chdir(sourcepath) sys.path.append(modpath) import main_help as mh import result_help as rh warnings.filterwarnings("ignore") mpl.rcParams['lines.linewidth'] = 3 mpl.rcParams['lines.markersize'] = 8 mpl.rcParams["grid.alpha"] = 0.5 mpl.rcParams["axes.grid"] = True precision = "Double" binary = precision + "Out" alpha = 8.418e-5 dto = 1e-5 divs = 1024 tpbs = 64 fqCoeff = 0.3 ksexactpath = op.join(exactpath, "KS" + precision + '_Official.txt') probs = [ ["Heat", 50.0], ["KS", 20.0], ["Euler", 0.22] ] def make_KSExact(): p = "KS" binName = p + binary executable = op.join(binpath, binName) dt = 1e-7 tf = probs[1][1] mh.runCUDA(executable, divs, tpbs, dt, tf, tf*fqCoeff, 1, ksexactpath) def Fo(dx,dt): alpha = 8.418e-5 return alpha*dt/dx**2 def heat_exact(t, divs, thing): dx = 0.001 L = divs*dx heats = lambda n, x: 1.0/n**2 * np.exp(-alpha*t*(n*np.pi/L)**2) * np.cos(n*x*np.pi/L) xm = np.linspace(0, L, divs) c0 = 50.0*L/3.0 cout = 400.0*L/(np.pi**2) Tf1 = np.empty(int(divs)) for i,xr in enumerate(xm): c = 2 ser = heats(c, xr) h = np.copy(ser) for k in range(5000): c += 2 ser = heats(c, xr) h += ser Tf1[i] = (c0 - cout * h) return Tf1 def euler_exact(t, divs, thing): warnings.filterwarnings("ignore") thing = thing.lower() dx = 1.0/float(divs-2) d = 0.5*dx r = np.arange(d, 1.0-d, dx) if thing == 'velocity': thing = "velocity_x" solver = Sod() soln = solver(r, t) return getattr(soln, thing) def ks_exact(t, divs, thing): return rh.Solved(ksexactpath).stripInitial()[thing][t] def rmse(exact, sim): return np.sqrt(np.mean((np.array(exact)-np.array(sim))**2)) #Test that classic and swept give the same results def consistency(problem, tf, dt=dto, div=4096, tpb=128): binName = problem + binary executable = op.join(binpath, binName) vfile = op.join(sourcepath, 'temp.dat') collect = [] pord = [1, 2, 0] for s in pord: mh.runCUDA(executable, div, tpb, dt, tf, tf*2.0, s, vfile) antworten = rh.Solved(vfile) collect.append((antworten.varNames, antworten.tFinal, antworten.vals)) print "Last tf = this tf? ", tf == antworten.tFinal[-1] tf = antworten.tFinal[-1] print "{!s} and tf = {:.10f}".format(problem, tf) return collect def rmse(exact,sim): return np.sqrt(np.mean((np.array(exact)-np.array(sim))**2)) #Swap out the second and last levels def switchDict(dct): dSw = dict() dSa = dict() for pkey in dct.keys(): dSw[pkey] = dict() dSa[pkey] = dict() for dtkey in dct[pkey].keys(): for vn in dct[pkey][dtkey].keys(): if vn not in dSw[pkey].keys(): dSw[pkey][vn] = collections.defaultdict(dict) dSa[pkey][vn] = collections.defaultdict(dict) for tf in dct[pkey][dtkey][vn].keys(): dSw[pkey][vn][tf][dtkey] = dct[pkey][dtkey][vn][tf] dSa[pkey][vn][dtkey][tf] = dct[pkey][dtkey][vn][tf] return dSw, dSa def plotit(dct, basename, shower, dtbool): #Figure, Subplot, Line, Xaxis: Yaxis lbls = ["dt (s)", "tFinal (s)"] axlabel = lbls if dtbool else lbls[::-1] ylbl = "Error" for k1 in dct.keys(): fig = plt.figure(figsize=(10,8)) probpath = op.join(plotpath, k1) pltname = k1 + basename pltpath = op.join(probpath, pltname) rw = 1 fig.suptitle(k1 + ' | {} spatial pts'.format(divs), fontsize="large", fontweight='bold') if len(dct[k1].keys()) > 2: rw = 2 for i, k2 in enumerate(dct[k1].keys()): ax = fig.add_subplot(rw, rw, i+1) ax.set_title(str(k2)) ax.set_ylabel(ylbl, fontsize="small") ax.set_xlabel(axlabel[0], fontsize="small") ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) for k3 in sorted(dct[k1][k2].keys()): x = [] y = [] for k4 in sorted(dct[k1][k2][k3].keys()): x.append(k4) y.append(dct[k1][k2][k3][k4]) if dtbool: ax.semilogx(x, y, label=str(k3)) else: ax.plot(x, y, label=str(k3)) hand, lbl = ax.get_legend_handles_labels() fig.legend(hand, lbl, loc='upper right', fontsize="large", title=axlabel[1]) fig.tight_layout(pad=0.2, w_pad=0.75, h_pad=1.5) fig.subplots_adjust(bottom=0.08, right=0.85, top=0.9, wspace=0.15, hspace=0.25) fig.savefig(pltpath, dpi=1000, bbox_inches="tight") if shower: plt.show() return None def rmcrud(ext): files = os.listdir(exactpath) for file in files: if file.endswith(ext): os.remove(file) return if __name__ == "__main__": sp.call("make", cwd=sourcepath) #make_KSExact() #Problem and finish time. dt is set by end of swept run. tol = 1e-5 upshot = ["Passed", "Failed"] algs = ["Swept", "Alternative", "Classic"] outfile = op.join(plotpath, 'consistency.out') of = open(outfile, 'w') of.write(str(dtime.datetime.now()) + '\n') for ty in probs: rslt = consistency(*ty) classic = rslt[-1] tf = classic[1][-1] col = dict() for r, a in zip(rslt, algs): col[a] = dict() for i, y in enumerate(r[1]): if y == tf: col[a][r[0][i]] = r[2][i] for a in algs[:-1]: for k in col[a].keys(): diff = rmse(col[a][k], col[algs[-1]][k]) u = upshot[diff>tol] outstr = '{!s} -- {!s} -- {!s}: {!s} ({:.3e})\n'.format(ty, a, k, u, diff) of.write(outstr) print outstr if u == upshot[1]: plt.plot(np.abs(np.array(col[a][k]) - np.array(col[algs[-1]][k]))) plt.show() of.close() #Now exact testing deltat = [5.0e-7, 1.0e-6, 5.0e-6, 1.0e-5, 5.0e-5, 1.0e-4] exacts = {'Heat': heat_exact, 'KS': ks_exact, 'Euler': euler_exact} rlt = collections.defaultdict(dict) rltCompare = collections.defaultdict(dict) for prob in probs: binName = prob[0] + binary executable = op.join(binpath, binName) vfile = op.join(exactpath, 'temp.dat') for dt in deltat: mh.runCUDA(executable, divs, tpbs, dt, prob[1], prob[1]*fqCoeff, 0, vfile) rlt[prob[0]][dt] = rh.Solved(vfile).stripInitial() ths = rlt[prob[0]][dt] for tk in ths.keys(): for tks in ths[tk].keys(): print tk, tks #t, dx, divs, varnames (Temperature) Could be arrays? What do rlt[prob[0]]['Exact'] = collections.defaultdict(dict) rd = rlt[prob[0]][deltat[-1]] for vn in rd.keys(): for tf in rd[vn].keys(): rlt[prob[0]]['Exact'][vn][tf] = exacts[prob[0]](tf, divs, vn) for pkey in sorted(rlt.keys()): tDict = rlt[pkey] for dtkey in tDict.keys(): rltCompare[pkey][dtkey] = collections.defaultdict(dict) if isinstance(dtkey, str): continue for vn in tDict[dtkey].keys(): for tf in tDict[dtkey][vn].keys(): print pkey, dtkey, vn, tf rltCompare[pkey][dtkey][vn][tf] = rmse(tDict[dtkey][vn][tf], tDict['Exact'][vn][tf]) rsltbydt, rsltbytf = switchDict(rltCompare) lbls = ["dt (s)", "tFinal (s)"] plotit(rsltbydt, "_ByDeltat.pdf", True, True) plotit(rsltbytf, "_ByFinalTime.pdf", True, False) rmcrud('.dat')
{ "repo_name": "OSUmageed/1DSweptCUDA", "path": "SweptSource/Testing/testProcedure.py", "copies": "2", "size": "8634", "license": "mit", "hash": 940887861803962600, "line_mean": 29.1888111888, "line_max": 104, "alpha_frac": 0.5449386148, "autogenerated": false, "ratio": 2.997916666666667, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9484037914762863, "avg_score": 0.011763473340760803, "num_lines": 286 }
# ~/anaconda2/bin/python # coding:utf-8 from gensim import corpora, models, matutils from sklearn.cluster import KMeans if __name__ == '__main__': word_list = [] for i in range(8000): with open('../../8000_words/' + str(i) + '.txt') as ifs: lines = ifs.readlines() lines = map(lambda line: line.strip(), lines) word_list.append(lines) # 列表,其中每个元素也是一个列表,即每行文字分词后形成的词语列表 # word_list = data_preprocessing_util.return_all_key_words() print len(word_list) # 生成文档的词典,每个词与一个整型索引值对应 word_dict = corpora.Dictionary(word_list) # 词频统计,转化成空间向量格式 corpus_list = [word_dict.doc2bow(text) for text in word_list] print len(corpus_list) # Tfidf模型 计算对应空间向量 # tfidf = models.TfidfModel(corpus_list) # corpus_tfidf = tfidf[corpus_list] # print len(corpus_tfidf) # LDA, 降低维度 lda = models.ldamodel.LdaModel(corpus=corpus_list, id2word=word_dict, num_topics=50, alpha='auto') corpus_lda = lda[corpus_list] print 'finish lda' # K-means, 聚类 kmean = KMeans(n_clusters=8, n_jobs=-1) lda_matrix = matutils.corpus2csc(corpus_lda).transpose() kmean.fit(lda_matrix) res = kmean.predict(lda_matrix) print res with open('output_tmp_res.txt', 'w') as ofs: print len(res) ofs.write('[' + ','.join(map(str, res)) + ']')
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/topic_model.py", "copies": "1", "size": "1515", "license": "mit", "hash": -4800162080615834000, "line_mean": 29.7045454545, "line_max": 102, "alpha_frac": 0.6262028127, "autogenerated": false, "ratio": 2.4743589743589745, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8599458521392602, "avg_score": 0.00022065313327449248, "num_lines": 44 }
# ~/anaconda2/bin/python # coding:utf-8 import operator import re # with open('output_all_words_sorted.txt') as ifs: # lines = ifs.readlines() # data_list = map(lambda ele: ele.rstrip().split(' '), lines) # data_list = map(lambda ele: [ele[0].split(':')[1], ele[1].split(':')[1].split(',')], data_list) # print len(data_list) # frequency_dict = dict() # for words in data_list: # for word in words[1]: # if word not in frequency_dict: # frequency_dict[word] = 0 # frequency_dict[word] += 1 # # print len(frequency_dict) # # sorted_x = sorted(frequency_dict.items(), key=operator.itemgetter(1))[::-1] # with open('frequency_top_words_1000.txt', 'w') as ofs: # for i in range(1000): # print sorted_x[i][0], sorted_x[i][1] # ofs.write(' '.join([sorted_x[i][0], str(sorted_x[i][1])]) + '\n') word_list = [] for i in range(8000): with open('../8000_words/' + str(i) + '.txt') as ifs: lines = ifs.readlines() lines = list(set(map(lambda line: line.strip(), lines))) word_list.append(lines) print len(word_list) frequency_dict = dict() for words in word_list: for word in words: if word not in frequency_dict: frequency_dict[word] = 0 frequency_dict[word] += 1 print len(frequency_dict) sorted_x = sorted(frequency_dict.items(), key=operator.itemgetter(1))[::-1] with open('frequency_top_words.txt', 'w') as ofs: for i in range(2000): # print sorted_x[i][0], sorted_x[i][1] ofs.write(' '.join([sorted_x[i][0], str(sorted_x[i][1])]) + '\n')
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/human_tag.py", "copies": "1", "size": "1633", "license": "mit", "hash": -2383531042676773000, "line_mean": 34.5, "line_max": 101, "alpha_frac": 0.5750153092, "autogenerated": false, "ratio": 2.990842490842491, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40658578000424905, "avg_score": null, "num_lines": null }
# ~/anaconda2/bin/python # coding:utf-8 import operator competition_key_word = ['足球', '篮球', '羽毛球', '客场', '主场', '队员', '联赛', '比赛', '棋手', '棋赛'] competition_file_name = 'competition.txt' finance_key_word = ['上涨', '大盘', '股市', '指数', '投资'] finance_file_name = 'finance.txt' def extract_key_words(file_name, key_words): count = 0 for i in range(8000): with open('../8000_words/' + str(i) + '.txt') as ifs: lines = ifs.readlines() lines = set(map(lambda line: line.strip(), lines)) one_line = ','.join(lines) for key_word in key_words: if key_word in one_line: count += 1 with open(file_name, 'a') as ofs: tmp_lst = [str(i), one_line] ofs.write(' '.join(tmp_lst) + '\n') break print count def cal_frequency(word_list): frequency_dict = dict() for words in word_list: for word in words: if word not in frequency_dict: frequency_dict[word] = 0 frequency_dict[word] += 1 print len(frequency_dict) sorted_x = sorted(frequency_dict.items(), key=operator.itemgetter(1))[::-1] return sorted_x def extract_statistics(filename): with open(filename) as ifs: lines = ifs.readlines() lines = map(lambda line: line.strip().split()[1].split(','), lines) word_list = [] for line in lines: word_list.append(line) sorted_pair = cal_frequency(word_list) print len(sorted_pair) with open('frequency_top_word_' + filename + '.txt', 'a') as ofs: for i in range(200): ofs.write(' '.join([sorted_pair[i][0], str(sorted_pair[i][1])]) + '\n') extract_key_words(finance_file_name, finance_key_word)
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/statistics.py", "copies": "1", "size": "1900", "license": "mit", "hash": 3211918996458567000, "line_mean": 30.1525423729, "line_max": 87, "alpha_frac": 0.5375408052, "autogenerated": false, "ratio": 3.10472972972973, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9140310505925833, "avg_score": 0.00039200580077947974, "num_lines": 59 }
# ~/anaconda2/bin/python # coding:utf-8 import os import re import jieba.analyse stop_words_set = {} stop_file_name = 'cn_stop_words.txt' res_output_file_name = 'output_whole_corpus.txt' res_output_all_words_file_name = 'output_all_words.txt' bad_info_file_name = 'output_bad_info.txt' def init_stop_words_set(file_name): global stop_words_set with open(file_name, 'r') as file: stop_words_set = {' '} | set([line.strip() for line in file]) def generate_whole_corpus_file(): with open(res_output_all_words_file_name, 'w') as ofs_all: with open(res_output_file_name, 'w') as ofs: walk = os.walk('./8000') line_list = {} all_words_list = [] for root, dir, files in walk: for file_name in files: my_path = root + '/' + file_name if '.txt' in file_name: line_list[file_name.rstrip('.txt')] = extract_key_words(my_path) all_words_list.append(extract_all_words(my_path)) ofs.write(str(line_list)) ofs_all.write(str(all_words_list)) def is_filtered_pattern(your_str): your_str = str(your_str) if re.match('[0-9]+', your_str): return True if re.match('[a-zA-Z]+', your_str): return True if your_str in stop_words_set: return True if re.match('.*[\r|\n]].*', your_str): return True if re.match('.* .*', your_str): return True def extract_key_words(input_file_name): with open(input_file_name) as ifs: lines = ifs.readlines() line = '\n'.join(lines) key_words = jieba.analyse.extract_tags(line, topK=20) # print key_words key_words = filter(lambda ele: not is_filtered_pattern(ele), key_words) return key_words def extract_all_words(input_file_name): with open(input_file_name) as ifs: lines = ifs.readlines() ret_all_words = [] for line in lines: all_words = jieba.analyse.extract_tags(line, topK=15) all_words = filter(lambda ele: not is_filtered_pattern(ele), all_words) ret_all_words.extend(all_words) return ret_all_words def read_file_get_dict(): with open(res_output_file_name) as ifs: eval_str = ifs.readline() my_dict = eval(eval_str) return my_dict def find_bad_things(): my_dict = read_file_get_dict() bad_list = [] with open('output_readable.txt', 'w') as readable_ofs: with open(bad_info_file_name, 'w') as ofs: for i in range(8000): if len(my_dict[str(i)]) < 5: tmp_str = 'question:' + str(i) + ' data:' + ','.join(my_dict[str(i)]) + ' data length:' + str( len(my_dict[str(i)])) + '\n' bad_list.append(str(i)) ofs.write(tmp_str) else: readable_ofs.write('question:' + str(i) + ' data:' + ','.join(my_dict[str(i)]) + '\n') return set(bad_list) def return_all_key_words(): my_dict = read_file_get_dict() words_list = [] for ele in my_dict: words_list.append(my_dict[ele]) return words_list def return_useful_words_list(): my_dict = read_file_get_dict() bad_set = find_bad_things() words_list = [] for ele in my_dict: if ele not in bad_set: words_list.append(my_dict[ele]) return words_list def return_all_words(): with open(res_output_all_words_file_name) as ifs: eval_str = ifs.readline() arr = eval(eval_str) return arr if __name__ == '__main__': init_stop_words_set(stop_file_name) generate_whole_corpus_file() filtered_word_list = return_useful_words_list() print len(filtered_word_list)
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/data_preprocessing_util.py", "copies": "1", "size": "3820", "license": "mit", "hash": 3799680655337158000, "line_mean": 29.3174603175, "line_max": 114, "alpha_frac": 0.5620418848, "autogenerated": false, "ratio": 3.1993299832495814, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42613718680495816, "avg_score": null, "num_lines": null }
# ~/anaconda2/bin/python # coding:utf-8 import re import jieba.analyse import os stop_words_set = set() stop_file_name = '../../cn_stop_words.txt' noise_words_first = ['可能与主题无关的词', 'badcase', '噪音词', ] noise_words_second = ['IT', '健康', '体育', '旅游', '教育', '文化', '军事', '财经'] map_res = map(lambda first: [first + second for second in noise_words_second], noise_words_first) noise_words = reduce(lambda left, right: left + right, map_res) blank_symbols = [r'\xe3\x80\x80', r'&nbsp;', r'&nbsp', r'&gt;', r'&gt', r'\x00', r'\s'] all_eng_count = 0 def init_stop_words_set(file_name): global stop_words_set with open(file_name, 'r') as ifs: lines = ifs.readlines() stop_words_set = set(map(lambda line: line.strip(), lines)) stop_words_set = map(lambda word: unicode(word, 'utf-8'), stop_words_set) def remove_regex(regex_str_list, lines): ret_lines = [] for line in lines: ret_lines.extend(re.sub('|'.join(regex_str_list), '', line).split()) return ret_lines def extract_eng_words(lines): eng_words = [] for line in lines: if re.match('.*[a-zA-Z]+.*', line): for ele in re.findall('[a-zA-Z]+', line): eng_words.append(ele) return eng_words def get_lines_single_file(file_name): with open(file_name) as ifs: lines = ifs.readlines() lines = remove_regex(blank_symbols, lines) lines = remove_regex(noise_words, lines) eng_words = extract_eng_words(lines) all_words_count = len(''.join(lines)) eng_words_count = len(''.join(eng_words)) global all_eng_count if float(eng_words_count) / all_words_count > 0.3: print 'cat' + ' ' + file_name all_eng_count += 1 return lines def remove_duplicate(lines): my_set = set() ret_lines = [] for line in lines: if line not in my_set: my_set.add(line) ret_lines.append(line) return ret_lines def get_words_single_file(filename): lines = remove_duplicate(get_lines_single_file(filename)) map_result = map(lambda line: jieba.posseg.cut(line), lines) words = reduce(lambda left, right: list(left) + list(right), map_result) word_vec = [] for word, tag in words: if word not in stop_words_set and re.match(r'n.*|v.*|a.*', tag) and not re.match(r'nr|ns|nz', tag): word_vec.append(word) # print word, tag_weight return word_vec def get_sorted_lines(): for i in range(8000): lines = remove_duplicate(get_lines_single_file('../../8000/' + str(i) + '.txt')) with open('../../8000_copy/' + str(i) + '.txt', 'w') as ofs: ofs.write('\n'.join(lines)) def get_words(): for i in range(8000): words = get_words_single_file('../../8000/' + str(i) + '.txt') write_words = '\n'.join(words) write_words = write_words.encode('utf-8') os.system('mkdir -p ../../8000_words/') with open('../../8000_words/' + str(i) + '.txt', 'w') as ofs: ofs.write(write_words) init_stop_words_set(stop_file_name) get_sorted_lines() get_words()
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/jieba_wrapper.py", "copies": "1", "size": "3191", "license": "mit", "hash": 420706411698867200, "line_mean": 30.0792079208, "line_max": 107, "alpha_frac": 0.5839439312, "autogenerated": false, "ratio": 2.955743879472693, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9037605487290925, "avg_score": 0.0004164646763537471, "num_lines": 101 }
# ~/anaconda2/bin/python # coding:utf-8 import re def check_error(): with open('/home/cheyulin/GitRepos/OJCodes/qlcoder/database/query/tuple_query/build/tmp.txt') as ifs: lines = ifs.readlines() to_used = lines[15: 15 + 13262] to_used = map(lambda ele: ele.lstrip().rstrip(), to_used) my_set = set() my_list = [-1] for ele in to_used: # print ele if not ('拉链' in ele or '其他' in ele): print 'bad' break tmp = ele.split(', ') sale_num = int(tmp[2]) created_at = long(tmp[7]) dsr = float(tmp[8]) id = long(tmp[0][1:]) my_set.add(id) my_list.append(id) print long(tmp[0][1:]) if not (8468 <= sale_num <= 20576 and 1400703171 <= created_at <= 1442106921 and 3.7 <= dsr <= 4.6): print 'bad' break print '\n' print 'len', len(to_used), len(my_set) print 'first:', to_used[0] print 'query string:', lines[14] for i in range(1, len(to_used)): if my_list[i] == my_list[i - 1]: print my_list[i], i if __name__ == '__main__': tagStr = "印花 新款 秋冬 纯色 字母 套装 套头 宽松 连帽 拼接 显瘦 拉链 条纹 " \ "加厚 修身 收腰 情侣 大码 撞色 加绒 清新 休闲 文艺 甜美 运动 学院 韩系 " \ "街头 通勤 OL 欧美 卡通 森系 复古 轻熟 民族 田园 简约 可爱 个性 聚酯纤维 涂层布 " \ "涤纶 毛呢布 锦纶 其他 迷彩布 聚酯 羊皮立领 双层领 堆堆领 连帽 翻领 其他 可脱卸帽 " \ "V领 圆领 口袋 拼接 品牌LOGO 拉链 系带 绣花 带毛领 字母 绑带 纽扣" tag = tagStr.split(" ") tag_set = set() for ele in tag: tag_set.add(ele) for ele in tag_set: for ele2 in tag_set: if ele != ele2 and ele in ele2: print ele, ele2 print len(tag_set)
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/database/query/check_error.py", "copies": "1", "size": "2080", "license": "mit", "hash": 1657614368806546000, "line_mean": 31.4727272727, "line_max": 112, "alpha_frac": 0.4848824188, "autogenerated": false, "ratio": 2.1287246722288438, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3113607091028844, "avg_score": null, "num_lines": null }
# ~/anaconda2/bin/python # coding:utf-8 # with open('tmp.txt') as line_ifs: # ref_words = line_ifs.readlines() import operator my_dict = { 0: '教育', 1: 'IT', # .. 2: '文化', 3: '旅游', 4: '体育', 5: '健康', 6: '财经', 7: '军事' } qlcoder_dict = { 'IT': 1, '健康': 2, '体育': 3, '旅游': 4, '教育': 5, '文化': 6, '军事': 7, '财经': 8 } def get_frequency_dict(words): frequency_dict = dict() for word in words: if word not in frequency_dict: frequency_dict[word] = 0 frequency_dict[word] += 1 sorted_x = sorted(frequency_dict.items(), key=operator.itemgetter(1))[::-1] return map(lambda ele: ele[0], sorted_x) def get_word_list(): word_list = [] for i in range(8000): with open('../../8000_words/' + str(i) + '.txt') as ifs: lines = ifs.readlines() lines = map(lambda line: line.strip(), lines) word_list.append(get_frequency_dict(lines)) return word_list with open('output_tmp_res.txt') as my_ifs: eval_str = my_ifs.readline() mark_lst = eval(eval_str) label = [] category_words = [[] for i in range(8)] word_list = get_word_list() for i in range(8000): label.append(qlcoder_dict[my_dict[mark_lst[i]]]) category_words[mark_lst[i]].extend(word_list) # if mark_lst[i] == 7: # with open('../../8000_words/' + str(i) + '.txt') as ifs: # print str(i) + ','.join(map(lambda line: line.strip(), ifs.readlines())) # label = map(str, label) label = map(str, label) print len(label) print '[' + ','.join(label) + ']' print len(category_words) idx=0 for category in category_words: print len(category), my_dict[idx] idx+=1 # category_words = map(lambda category: get_frequency_dict(category), category_words)
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/lookup.py", "copies": "1", "size": "1946", "license": "mit", "hash": -1484252413989892400, "line_mean": 22.9240506329, "line_max": 90, "alpha_frac": 0.5354497354, "autogenerated": false, "ratio": 2.8549848942598186, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.38904346296598186, "avg_score": null, "num_lines": null }
# ~/anaconda2/bin/python # coding:utf-8 tag_word_dict = dict() with open('frequency_top_1000.txt') as ifs: lines = ifs.readlines() for line in lines: info = line.rstrip().split(' ') if len(info) > 2: word = info[0] tag = info[2] if tag not in tag_word_dict: tag_word_dict[tag] = [] tag_word_dict[tag].append(word) print len(tag_word_dict) for tag in tag_word_dict: print tag, len(tag_word_dict[tag]) # print tag_word_dict[tag_weight] def get_tag_dict(word_list, tag_word_dict): tuple_dict = dict() for word in word_list: for tag in tag_word_dict: if word in tag_word_dict[tag]: if tag not in tuple_dict: tuple_dict[tag] = 0 tuple_dict[tag] += 1 return tuple_dict def pretty_dict(my_tag_dict): str_list = [] for tag in my_tag_dict: str_list.append(tag + '@' + str(my_tag_dict[tag])) return ','.join(str_list) with open('output_readable.txt') as ifs: lines = ifs.readlines() data_list = map(lambda ele: ele.rstrip().split(' '), lines) data_list = map(lambda ele: [ele[0].split(':')[1], ele[1].split(':')[1].split(',')], data_list) data_list = map(lambda ele: [ele[0], ele[1], get_tag_dict(ele[1], tag_word_dict)], data_list) okay_tag = 0 with open('output_readable_taged.txt', 'w') as ofs: for data in data_list: my_lst = ['question:' + str(data[0]), 'data:' + ','.join(data[1]), 'tags:' + pretty_dict(data[2])] if len(data[2]) >= 1: okay_tag += 1 else: print ' '.join(my_lst) ofs.write(' '.join(my_lst) + '\n') print okay_tag
{ "repo_name": "AlgorithmLover/OJCodes", "path": "qlcoder/data_mining/topic_model/scripts/python/tag_preprocessing.py", "copies": "1", "size": "1768", "license": "mit", "hash": 4635893761364863000, "line_mean": 30.0175438596, "line_max": 110, "alpha_frac": 0.5299773756, "autogenerated": false, "ratio": 3.1684587813620073, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9195873605151336, "avg_score": 0.0005125103621344223, "num_lines": 57 }
#!/anaconda2/bin/python import re import json from datetime import datetime print('-------------------------------------------------------------------------------------') print('-------------------------------------------------------------------------------------') print(' Scrabble Scorecard ') print('-------------------------------------------------------------------------------------') print('-------------------------------------------------------------------------------------') print('Enter all words made in a turn separated with space. For e.g.: TIGER TWO') print('Enter double/triple letter words within () after the letter. For e.g: TIG(2)ER TW(3)O') print('Enter double/triple word within [] after the word. For e.g.: TIG(2)ER[2] TW(3)O[3]') print('Enter blank letter with (0) after the letter. For e.g.: T(0)IGER T(0)WO') print('-------------------------------------------------------------------------------------') print('-------------------------------------------------------------------------------------') # Dic of Scrabble points LETTERPOINTDICT = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10} # Set of valid characters VALIDCHARS = set('abcdefghijklmnopqrstuvwxyz023()[] ') # List of markup characters MARKUPCHARS = ['(', '0', '2', '3', ')', '[', ']'] # Get the number of players while True: rawinput = raw_input('Enter number of players playing the game (1-4): ').strip() if set(rawinput) <= set('1234') and len(rawinput) > 0: break numberofplayers = int(rawinput) print('-------------------------------------------------------------------------------------') # Get the names of the players index = 0 players = [] while index < numberofplayers: playername = '' while len(playername) == 0: playername = str(raw_input('Enter player name: ')) players.append(playername) index = index + 1 runningpoints = [0] * numberofplayers print('-------------------------------------------------------------------------------------') # This list will save all the moves of the game game = [] # Start the game while True: playerindex = 0 while playerindex < numberofplayers: # Strictly allow only valid characters that we can process. # We will keep asking for the input till we get a valid one while True: rawinput = str(raw_input('Enter words by player ' + players[playerindex] + ': ')).strip().lower() if set(rawinput) <= VALIDCHARS and len(rawinput) > 0: break words = rawinput.split() # Now we need to parse the words based on the following rules: # Double/Triple letter is denoted with (2) or (3) just after the letter # Blank tile will be denoted with (0) just after the letter # Double/Triple word is denoted with [2] or [3] just after the word turnpoints = 0 turnwords = [] for word in words: bonusletters = list([m.start() for m in re.finditer('\([023]\)', word)]) bonusletterindex = [index - 1 for index in bonusletters] # Calculate points wordpoints = 0 letters = list(word) for index, letter in enumerate(letters): letterbonus = 1 if index in bonusletterindex: letterbonus = int(letters[index+2]) if letter not in MARKUPCHARS: wordpoints = wordpoints + LETTERPOINTDICT[letter] * letterbonus # Check if there is word bonus if letters[-1] == ']' and letters[-3] == '[': wordpoints = wordpoints * int(letters[-2]) plainword = ''.join( c for c in word if c not in MARKUPCHARS) print(plainword.upper() + ' - ' + str(wordpoints)) # Save the words turnword = {} turnword['PLAINWORD'] = plainword.upper() turnword['WORDPOINTS'] = wordpoints turnwords.append(turnword) turnpoints = turnpoints + wordpoints # Add bonus points if all tiles used bonuspoints = 50 if str(raw_input('Add 50 bonus points? (y/n): ')) == 'y' else 0 turnpoints = turnpoints + bonuspoints print('Points earned in this turn: ' + str(turnpoints)) # Soring the final points runningpoints[playerindex] = runningpoints[playerindex] + turnpoints # Save the turn data in a dictionary turn = {} turn['PLAYER'] = players[playerindex] turn['TURNWORDS'] = turnwords turn['BONUSPOINTS'] = bonuspoints turn['TURNPOINTS'] = turnpoints turn['RUNNINGPOINTS'] = runningpoints[playerindex] # Append turn to game game.append(turn) # Next player playerindex = playerindex + 1 print('-------------------------------------------------------------------------------------') print('-------------------------------------------------------------------------------------') # Show current points status for index, player in enumerate(players): print(player + ': ' + str(runningpoints[index])) print('-------------------------------------------------------------------------------------') print('-------------------------------------------------------------------------------------') continuegame = str(raw_input('Continue? (y/n): ')) if continuegame == 'n': break print('-------------------------------------------------------------------------------------') # Save the game to a file filename = datetime.now().strftime('%d-%b-%Y-%H-%M') + '.txt' with open(filename, 'w') as gamefile: gamefile.write(json.dumps(game))
{ "repo_name": "chiragr/utils", "path": "scrabble/scrabblescore.py", "copies": "1", "size": "5967", "license": "mit", "hash": 7037802657401360000, "line_mean": 39.0469798658, "line_max": 228, "alpha_frac": 0.465057818, "autogenerated": false, "ratio": 4.323913043478261, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5288970861478262, "avg_score": null, "num_lines": null }
#!~/anaconda2/bin/python import sys if __name__=='__main__': if len(sys.argv) < 3: print("USAGE: python" + sys.argv[0] + " keyword_dir keyword_list_file") exit(1) keyword_dir = sys.argv[1] keyword_list_file = sys.argv[2] keyword_list = open(keyword_list_file).readlines() for keyword_id in keyword_list: phn_list = open(keyword_dir + keyword_id.strip() + ".PHN").readlines() start_time, end_time, phone_id = phn_list[0].strip().split() fid = open(keyword_dir + keyword_id.strip() + ".bnd", "w") bnd_times=[] bnd_times.append(0) for i in range(len(phn_list)): new_start_time, new_end_time, new_phone_id = phn_list[i].strip().split() next_bnd = int(((int(new_end_time)-int(start_time))-400) / 160 + 1) if next_bnd <= 0: print("warnning:bnd less than 0\n") continue bnd_times.append(next_bnd) fid.writelines(str(len(bnd_times))) for i in range(len(bnd_times)): fid.writelines( " " + str(bnd_times[i])) fid.writelines("\n") fid.close()
{ "repo_name": "jingyonghou/TIMIT_STD", "path": "script/phn2bnd.py", "copies": "1", "size": "1147", "license": "mit", "hash": 8274221762197997000, "line_mean": 36, "line_max": 84, "alpha_frac": 0.5483870968, "autogenerated": false, "ratio": 3.1, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41483870968, "avg_score": null, "num_lines": null }
#!~/anaconda2/bin/python -u import numpy as np #--- To handle math processing class IntegralImage: """Class to store Integral Image an their properties""" def __init__(self, imData, label): #self.imData = imData self.label = label self.weight = 0 self.computeII(imData) def computeII(self, imData): """ #---------------------------------------------------------------------------# #--- computeII ---# #---------------------------------------------------------------------------# Description: Computes Integral Image Inputs: file: pointer to image file Outputs: outData: II array Author: Quiros Diaz, Lorenzo Date: Apr/20/2016 #------------------------------------------------------------------------------# """ self.InIm = np.zeros(imData.shape) self.InIm = imData.cumsum(axis=0) for dim in range(1,imData.ndim): self.InIm = self.InIm.cumsum(axis=dim) def getSum(self, TL, BR): """ #---------------------------------------------------------------------------# #--- getSum ---# #---------------------------------------------------------------------------# Description: Computes the sum of II in the rectangle specified Inputs: self: II object Outputs: aSum: [float] Author: Quiros Diaz, Lorenzo Date: Apr/22/2016 #------------------------------------------------------------------------------# """ #--- transform X,Y space to R,C space TL = (TL[1], TL[0]) BR = (BR[1], BR[0]) #--- if TL==UR return II value on that point if (TL == BR): return self.InIm[TL] #--- else A = (II[BRr,BRc]+II[TLr,TLc]) - (II[TLr,BRc]+II[BRr,TLc]) return (self.InIm[BR] + self.InIm[TL]) - (self.InIm[TL[0], BR[1]] + self.InIm[BR[0], TL[1]]) def setLabel(self, label): """ #---------------------------------------------------------------------------# #--- setLabel ---# #---------------------------------------------------------------------------# Description: Set if self is a face Label=1 or non-Face Label=0 Inputs: label: [int] label to assign to the image Outputs: NONE Author: Quiros Diaz, Lorenzo Date: Apr/22/2016 #------------------------------------------------------------------------------# """ self.label = label def setWeight(self,weight): """ #---------------------------------------------------------------------------# #--- setWeight ---# #---------------------------------------------------------------------------# Description: Set Weight for current image Inputs: weight: un-normalized weight Outputs: NONE Author: Quiros Diaz, Lorenzo Date: Apr/22/2016 #------------------------------------------------------------------------------# """ self.weight = weight def getII(self): """ #---------------------------------------------------------------------------# #--- getII ---# #---------------------------------------------------------------------------# Description: return II array Inputs: NONE Outputs: II: [array] n-dimensional Author: Quiros Diaz, Lorenzo Date: Apr/22/2016 #------------------------------------------------------------------------------# """ return self.InIm def computeII(imData): """ #---------------------------------------------------------------------------# #--- computeII ---# #---------------------------------------------------------------------------# Description: Computes Integral Image Inputs: imData: image array Outputs: outData: II array Author: Quiros Diaz, Lorenzo Date: Apr/20/2016 #------------------------------------------------------------------------------# """ #--- loop over each image outData = np.zeros(imData.shape) for i in range(imData.shape[0]): outData[i,:,:] = imData[i,:,:].cumsum(axis=0) outData[i,:,:] = outData[i,:,:].cumsum(axis=1) return outData #---- TODO: compute II as cumsum in 0 axis and 1 axis, then pass the function to each array in matrix def getSum(II, TL, BR): """ #---------------------------------------------------------------------------# #--- getSum ---# #---------------------------------------------------------------------------# Description: Computes the sum of II in the rectangle specified Inputs: self: II object Outputs: aSum: [float] Author: Quiros Diaz, Lorenzo Date: Apr/22/2016 #------------------------------------------------------------------------------# """ #--- transform X,Y space to R,C space TL = (TL[1], TL[0]) BR = (BR[1], BR[0]) #--- if TL==UR return II value on that point if (TL == BR): return II[TL] #--- else A = (II[BRr,BRc]+II[TLr,TLc]) - (II[TLr,BRc]+II[BRr,TLc]) return (II[BR] + II[TL]) - (II[TL[0], BR[1]] + II[BR[0], TL[1]])
{ "repo_name": "lquirosd/TFM", "path": "ILA/code/IntegralImage.py", "copies": "1", "size": "5710", "license": "apache-2.0", "hash": 481117663569981100, "line_mean": 32.7869822485, "line_max": 104, "alpha_frac": 0.3290718039, "autogenerated": false, "ratio": 4.746467165419784, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5575538969319783, "avg_score": null, "num_lines": null }
#!~/anaconda2/envs/ipython3/bin/python import numpy as np import scipy as sp import scipy.stats as stats import pandas as pd import copy import cv2 import matplotlib.animation as animation import matplotlib.pyplot as plt import pickle def N_A(**kwargs): N = kwargs['N'] if 'N' in kwargs else 5 A = kwargs['A'] if 'A' in kwargs else np.eye(N) return N, A def KalmanFilterUpdate(y_t, x_tmin1_tmin1, P_tmin1_tmin1, Phi, Q, R, **kwargs): weight = kwargs['weight'] if 'weight' in kwargs else 1 N, A = N_A(**kwargs) # ii - we need the forward Kalman Filter x_t_tmin1 = Phi.dot(x_tmin1_tmin1) P_t_tmin1 = Phi.dot(P_tmin1_tmin1).dot(Phi.T) P_t_tmin1[0:N, 0:N] += Q sigma_t = A.dot(P_t_tmin1).dot(A.T) + R sigma_t_inv = np.linalg.pinv(sigma_t) K_t = P_t_tmin1.dot(A.T).dot(sigma_t_inv) epsilon_t = (y_t - (A.dot(x_t_tmin1))) * weight x_t_t = x_t_tmin1 + K_t.dot(epsilon_t) P_t_t = (np.eye(N) - K_t.dot(A)).dot(P_t_tmin1) return { "x_t_t" : x_t_t, "x_tmin1_t" : x_t_tmin1, "P_t_t" : P_t_t, "P_tmin_t" : P_t_tmin1, "Sig_t_inv" : sigma_t_inv, "Sig_t" : sigma_t, "Innovation_t" : epsilon_t, "K_t": K_t } def ForwardKalmanFilter(ys, mu0, Sig0, Phi, Q, R, **kwargs): x_tmin1_tmin1 = mu0 P_tmin1_tmin1 = Sig0 Xs = np.zeros((len(ys) + 1, len(x_tmin1_tmin1))) Ps = np.zeros((len(ys) + 1, P_tmin1_tmin1.shape[0], P_tmin1_tmin1.shape[1])) X_smin1_s = np.zeros((len(ys) + 1, len(x_tmin1_tmin1))) P_smin1_s = np.zeros((len(ys) + 1, P_tmin1_tmin1.shape[0], P_tmin1_tmin1.shape[1])) Xs[0] = x_tmin1_tmin1 Ps[0] = P_tmin1_tmin1 X_smin1_s[0] = x_tmin1_tmin1 P_smin1_s[0] = P_tmin1_tmin1 incomplete_likelihood = 0 innovations = np.zeros_like(ys) for i, yt in enumerate(ys): if 'weights' in kwargs: param_update = KalmanFilterUpdate( y_t = yt, x_tmin1_tmin1 = Xs[i], P_tmin1_tmin1 = Ps[i], Phi = Phi, Q = Q, R = R, N = kwargs['N'], weight = kwargs['weights'][i] ) else: param_update = KalmanFilterUpdate( y_t = yt, x_tmin1_tmin1 = Xs[i], P_tmin1_tmin1 = Ps[i], Phi = Phi, Q = Q, R = R, N = kwargs['N'] ) x_t_t = param_update['x_t_t'] P_t_t = param_update['P_t_t'] x_tmin1_t = param_update['x_tmin1_t'] P_tmin1_t = param_update['P_tmin_t'] Xs[i+1] = x_t_t Ps[i+1] = P_t_t X_smin1_s[i+1] = x_tmin1_t P_smin1_s[i+1] = P_tmin1_t this_likelihood = 0.5 * np.log(np.linalg.det(param_update['Sig_t'])) innov_err = param_update['Innovation_t'].T.dot(param_update['Sig_t_inv']).dot(param_update['Innovation_t']) this_likelihood += 0.5 * np.log(innov_err) incomplete_likelihood += this_likelihood innovations[i] = param_update['Innovation_t'] return { "Xs": Xs, "Ps": Ps, "X_smin1_s":X_smin1_s, "P_smin1_s":P_smin1_s, "Kn": param_update['K_t'], "incomplete_likelihood": incomplete_likelihood, 'innovations': innovations } def SmootherUpdate(x_n_t, P_n_t, t, Phi, fwd_params, **kwargs): x_tmin1_tmin1 = fwd_params['Xs'][t-1] x_tmin1_t = fwd_params['X_smin1_s'][t] P_tmin1_tmin1 = fwd_params['Ps'][t-1] P_tmin1_t = fwd_params['P_smin1_s'][t] J_tmin1 = P_tmin1_tmin1.dot(Phi.T).dot(np.linalg.pinv(P_tmin1_t)) weight = 1 if 'weights' not in kwargs else kwargs['weights'][t] x_n_tmin1 = x_tmin1_tmin1 + J_tmin1.dot(x_n_t - x_tmin1_t) P_n_tmin1 = P_tmin1_tmin1 + J_tmin1.dot( P_n_t - P_tmin1_t).dot(J_tmin1.T) return { "x_n_tmin1": x_n_tmin1, "P_n_tmin1": P_n_tmin1, "J_tmin1" : J_tmin1 } def KalmanSmoother(fwd_params, Phi, **kwargs): n = len(fwd_params["Xs"]) x_n_n = fwd_params["Xs"][-1] P_n_n = fwd_params["Ps"][-1] Xn = np.zeros((n, len(x_n_n))) Pn = np.zeros((n, P_n_n.shape[0], P_n_n.shape[1])) Jn = np.zeros((n, P_n_n.shape[0], P_n_n.shape[1])) Xn[-1] = x_n_n Pn[-1] = P_n_n for t in range(n-1, -1, -1): smt_update = SmootherUpdate(x_n_n, P_n_n, t, Phi, fwd_params, **kwargs) x_n_n = smt_update["x_n_tmin1"] P_n_n = smt_update["P_n_tmin1"] Xn[t] = x_n_n Pn[t] = P_n_n Jn[t] = smt_update["J_tmin1"] return { "Xn": Xn, "Pn": Pn, "Jn": Jn } def OneLagUpdate(P_n_t_tmin1, t, Ps, Jn, Phi): P_n_tmin1_tmin2 = Ps[t-1].dot(Jn[t-2].T) + Jn[t-1].dot(P_n_t_tmin1 - Phi.dot(Ps[t-1])).dot(Jn[t-2].T) return P_n_tmin1_tmin2 def OneLagCovarianceSmoother(fwd_params, bkwd_params, Phi, **kwargs): N, A = N_A(**kwargs) n = len(fwd_params["Xs"]) K = len(fwd_params["Xs"][0]) Kn = fwd_params['Kn'] P_n_n_nmin1 = (np.eye(K) - Kn.dot(A)).dot(Phi).dot(bkwd_params["Pn"][-2]) P_one_lag = np.zeros((n, P_n_n_nmin1.shape[0], P_n_n_nmin1.shape[1])) P_one_lag[-1] = P_n_n_nmin1 Ps = fwd_params['Ps'] Jn = bkwd_params['Jn'] for t in range(n-1, 1, -1): P_n_tmin1_tmin2 = OneLagUpdate(P_n_n_nmin1, t, Ps, Jn, Phi) P_one_lag[t] = P_n_tmin1_tmin2 return P_one_lag def MaximisationUpdate(ys, fwd_params, bkwd_params, Phi, **kwargs): N, A = N_A(**kwargs) Pn = bkwd_params['Pn'] Xn = bkwd_params['Xn'] Pn_one_lag = OneLagCovarianceSmoother(fwd_params, bkwd_params, Phi, **kwargs) n = len(fwd_params['Xs']) start = 2 end = n S_1_0 = np.zeros_like(fwd_params['Ps'][0]) S_1_1 = np.zeros_like(fwd_params['Ps'][0]) S_0_0 = np.zeros_like(fwd_params['Ps'][0]) for i, (x_n_t, x_n_tmin1, P_n_t, P_n_tmin1, P_n_t_tmin1) in enumerate(zip(Xn[start+1:end], Xn[start:end-1], Pn[start+1:end], Pn[start:end-1], Pn_one_lag[start:end-1])): x_n_t = x_n_t.reshape(-1,1) x_n_tmin1 = x_n_tmin1.reshape(-1,1) S_1_1 += x_n_t.dot(x_n_t.T) + P_n_t S_1_0 += x_n_t.dot(x_n_tmin1.T) + P_n_t_tmin1 S_0_0 += x_n_tmin1.dot(x_n_tmin1.T) + P_n_tmin1 reg_ = 5e-2 S_0_0_inv = np.linalg.pinv(S_0_0 + reg_*np.eye(N)) Phi_j = (S_1_0+reg_*np.eye(N)).dot(S_0_0_inv) Q_j = 1/(n) * (S_1_1 - S_1_0.dot(S_0_0_inv).dot(S_1_0.T)) for i, row in enumerate(Q_j): for j,_ in enumerate(row): if i != j: Q_j[i,j] = 0 R_j = np.zeros_like(A.dot(A.T)) for y, X, P in zip(ys[1:], Xn[1:], Pn[1:]): R_j += (y - A.dot(X)).dot((y - A.dot(X)).T) + A.dot(P).dot(A.T) R_j /= (n) return { "mu0": Xn[1], "muEnd": Xn[-1], "Sig0": Pn[1], "Phi": Phi_j, "Q": Q_j, "R": R_j } def EM_step(ys, params, **kwargs): mu0 = params['mu0'] Sig0 = params['Sig0'] Phi = params['Phi'] Q = params['Q'] R = params['R'] # Expectation Step (using Kalman Filter and Smoother) fwd_params = ForwardKalmanFilter(ys, mu0, Sig0, Phi, Q, R, **kwargs) bkwd_params = KalmanSmoother(fwd_params, Phi) # Maximisation Step (using update equation that was derived in [1]) update_params = MaximisationUpdate(ys, fwd_params, bkwd_params, Phi, **kwargs) return update_params, fwd_params def ExpectationMaximisation(ys, starting_params, **kwargs): num_iter = 100 params = starting_params lklihoods = np.zeros(num_iter) for k in range(num_iter): params_new, fwd_params = EM_step(ys, params, **kwargs) Phi = params_new['Phi'] params['Phi'] = Phi params['mu0'] = params_new['mu0'] params['Sig0'] = params_new['Sig0'] params['muEnd'] = params_new['muEnd'] params['Q'] = params_new['Q'] params['innovations'] = fwd_params['innovations'] # params['R'] = params_new['R'] lklihoods[k] = fwd_params['incomplete_likelihood'] if k == 0: continue if np.linalg.norm(lklihoods[k] - lklihoods[k-1]) < 1e-2: break return params, lklihoods def learn_breakpoint(ys, num_breaks, starting_params, percentage=98, sensitivity=2): T = len(ys) N = len(ys[0]) break_points = [] # initialise the cut points breaks = np.concatenate([[0], np.arange(T//num_breaks, T, T//num_breaks)[:num_breaks-1], [T]]) break_points.append(breaks) # iterate to find the new breaks penalty = 0 for k in range(20): print('starting', breaks) result_params, fwd_filts = [], [] try: new_breaks = [] for j in range(1, len(breaks)): p_, _ = ExpectationMaximisation(ys[breaks[j-1]:breaks[j]], copy.deepcopy(starting_params), N=starting_params['N']) result_params.append(p_) # fwd_filt = ForwardKalmanFilter( # ys, # p_['mu0'], # p_['Sig0'], # p_['Phi'], # p_['Q'], # p_['R'], # N=starting_params['N'] # ) # # # alpha = 3 # # beta = (3*2/len(breaks-1))*j # # log_prior = np.log(1/T) # prob = stats.multivariate_normal(mean=np.zeros(starting_params['N']), cov=1e-4*np.eye(starting_params['N'])).logpdf(fwd_filt['innovations']) # errs.append(prob.reshape(-1,1) for j in range(1, len(breaks)-1): lp_l = np.zeros(T-1) lp_r = np.zeros(T-1) R = np.eye(N) * 1e-3 phi1 = result_params[j-1]['Phi'] phi2 = result_params[j]['Phi'] ts = np.arange(0,1,1/T) alpha = j*1/(len(breaks)-1) * num_breaks + 1 # alpha = breaks[j]/T * num_breaks + 1 beta = num_breaks + 2 - alpha # the 4 here means we have a prior on the number of chains that we expect tot = alpha + beta alpha = alpha/tot * (2+num_breaks) beta = beta/tot * (2+num_breaks) lp_l = np.zeros(T-1) lp_r = np.zeros(T-1) for t in range(1, T-1): lp_l[t] = lp_l[t-1] + stats.multivariate_normal(np.dot(phi1, ys[t-1]), R).logpdf(ys[t]) lp_r[t] = lp_r[t-1] + stats.multivariate_normal(np.dot(phi2, ys[t-1]), R).logpdf(ys[t]) lp = stats.beta(alpha, beta).logpdf(ts[1:-1]) + np.ones(T-2)*lp_r[-1] + lp_l[:-1] - lp_r[:-1] # lp = -np.log(T) + np.ones(T-2)*lp_r[-1] + lp_l[:-1] - lp_r[:-1] # lp = np.ones(T-2)*np.log(1/T) + np.ones(T-2)*lp_r[-1] + lp_l[:-1] - lp_r[:-1] new_break = np.argmax(lp) + 1 new_breaks.append(new_break) if num_breaks > 1: ## TODO: Seriously you need to make a better boundary decision than this. breaks = [0, T] + new_breaks # for i in range(1, num_breaks): # penalty += sensitivity # if penalty > 8: # penalty = 8 # # how heavily are we penalising outliers? # max_ = np.sort(np.where(np.argmax(errs, axis=1) == i-1))[0][-(penalty+1)] # min_ = np.sort(np.where(np.argmax(errs, axis=1) == i))[0][penalty] # # print('updating:', min_, max_) # breaks.append((max_ + min_)//2) breaks = np.array(np.sort(breaks)) print('updated', breaks) else: print('only two breaks') breaks = np.concatenate([[0],[T]]) if len(np.where(np.diff(breaks) < 30)[0]) > 0: to_rem = np.where(np.diff(breaks) < 30)[0][0] + 1 breaks = list(breaks) del breaks[to_rem] print("**chain collapse", to_rem) num_breaks -= 1 # breaks = np.array(breaks) # num_breaks -= 1 # breaks = np.concatenate([[0], np.arange(T//num_breaks, T, T//num_breaks)[:num_breaks-1], [T]]) break_points.append(breaks) continue if len(breaks) == len(break_points[-1]): print('checking convergence', k) if np.sum(np.abs((breaks - break_points[-1]))) <= 4: print('Converged') break break_points.append(breaks) except: num_breaks -= 1 breaks = np.concatenate([[0], np.arange(T//num_breaks, T, T//num_breaks)[:num_breaks-1], [T]]) break_points.append(breaks) print("**SVD Converge Caught", num_breaks) continue return { 'result_params': result_params, 'fwd_filts': fwd_filts, 'breaks': breaks } def update_img_time(n, cap, im, ann): ret,frame = cap.read() min_ = n//60 sec_ = n - min_*60 ann.set_text("frame: %i:%02d min" % (min_, sec_)) if ret==True: im.set_data(frame[864:1080,0:384]) return im def get_mini_view_video(fname_mov, fname_csv): cap = cv2.VideoCapture(fname_mov) count = 0 dpi = 100 fig = plt.figure() ax = fig.add_subplot(111) ax.set_aspect('equal') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ret,frame = cap.read() im = ax.imshow(frame[864:1080,0:384]) fig.set_size_inches([5,5]) ann = ax.annotate("frame: %i:%02d min" % (0, 0), xy=[0,0], xytext=[0,0]) ani = animation.FuncAnimation(fig, update_img_time, len(pd.read_csv(fname_csv)), fargs=(cap, im, ann), interval=500) writer = animation.writers['ffmpeg'](fps=10) ani.save(fname_mov.replace('.mov', '_cleaned.mov'), writer=writer) return True def plot_data_and_boundaries(df, em_results, ax, **kwargs): ys = df.values times = em_results['breaks'][1:] times[-1] = len(ys) results = em_results['result_params'] N, A = N_A(**kwargs) if 'water' in kwargs: water = kwargs['water'] else: water = [np.zeros(N)] water[0][0] = 1 timesteps = np.arange(1,len(ys)) Phi = results[0]['Phi'] j = 1 for i in timesteps: if j <= len(times): if i > times[j-1]: Phi = results[j]['Phi'] j += 1 water.append(A.dot(Phi.dot(water[i-1]))) water = np.array(water) c = ['b', 'g', 'r', 'purple', 'gold', 'pink'] labels=['waterfall', 'desert', 'plains', 'jungle', 'wetlands', 'reservoir'] for i in range(len(water[0])): ax.plot(water[:,i], c=c[i], ls='--') for j in times[:-1]: ax.axvline(j, c='black') min_ = j//60 sec_ = (j - min_*60) ax.annotate("%i:%02d min" % (min_, sec_), xy=(j, 1), xytext=(j-20, 1.1), arrowprops=dict(facecolor='black', shrink=0.05), ) df.plot(ax=ax, color=c) ax.set_xticklabels([round(i/60,1) for i in ax.get_xticks()]) ax.set_ylabel('Normalised Water Level') ax.set_xlabel('Time (min)') return ax def stan_sampled(df, M=4, sw=[]): T = df.shape[0] N = df.shape[1] # initialise the switches uniformly if len(sw) <= 0: sw = np.array([i for i in range(0,T,T//(M+1))][:-1] + [T]) with open('./../stan/known_switches_unknown_params.pkl', 'rb') as f: ssm = pickle.load(f) data = { 'y': df.values, 'N': N, 'T': T, 'M': len(sw)-1, 'S': sw[1:] } fit = ssm.sampling(data=data, iter=2000, chains=4) la = fit.extract(permuted=True) phis = la.get('phi').mean(axis=0) fits = [] with open('./../stan/unknown_switches_known_params.pkl', 'rb') as f: ssm = pickle.load(f) for i in range(len(sw)-2): data = {'y': df.values, 'N': N, 'T': T, 'M': 2, 'phi': phis[i:i+2,:], 'sw': sw[i+1], 'p': 15 } fits.append(ssm.sampling(data=data, iter=2000, chains=4, seed=12)) new_splits = set() for fit in fits: la = fit.extract(permuted=True) means = np.mean(la.get('lp'), axis=0) new_max = np.argmax(means) print(new_max) if new_max < 20: new_max = 0 elif new_max > T-20: new_max = T if not (np.abs(np.array(list(new_splits)) - new_max) < 15).any(): new_splits.add(new_max) new_splits.add(0) new_splits.add(T) new_splits = np.sort(list(new_splits)) sw = new_splits with open('./../stan/known_switches_unknown_params.pkl', 'rb') as f: ssm = pickle.load(f) data = { 'y': df.values, 'N': N, 'T': T, 'M': len(sw)-1, 'S': sw[1:] } fit = ssm.sampling(data=data, iter=2000, chains=4) la = fit.extract(permuted=True) stan_results = {} phis = la.get('phi').mean(axis=0) return { 'breaks': np.sort(sw), 'result_params': [{'Phi':p} for p in la.get('phi').mean(axis=0)], 'fit': fit }
{ "repo_name": "NickHoernle/Essil", "path": "baseline_model/cw_utils.py", "copies": "1", "size": "17794", "license": "mit", "hash": 1067683530098730500, "line_mean": 28.2664473684, "line_max": 158, "alpha_frac": 0.4887602563, "autogenerated": false, "ratio": 2.7643312101910826, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8727964436309538, "avg_score": 0.005025406036308921, "num_lines": 608 }
#!~/anaconda/bin/ python # -*- coding: utf-8 -*- ################################################################################ # Imports ################################################################################ import data import json import csv import vincent from vincent import AxisProperties, PropertySet, ValueRef from flask import Flask, render_template, request app = Flask(__name__) ################################################################################ # Globals ################################################################################ Final_File = "Data/final_files/Final.csv" Map_Init = "static/USA_Init_Map.json" Map_States = "static/us_states.topo.json" Map_StateMap_Path = "static/states_topo_json/" Map_Zips = "static/zips_us_topo.json" State_LandArea = r"Data/raw_files/state_landarea_rank.csv" #Read final DataFrame df_final = data.df_final #Read State Land Area with open(State_LandArea) as f: f.readline() # ignore first line (header) land_area = dict(csv.reader(f, delimiter=',')) ################################################################################ # Routes ################################################################################ @app.route("/") def index(): return render_template('index.html') @app.route("/data/map") def data_map(): return_dict = {} income = request.args.get("income") housing = request.args.get("housing") diversity = request.args.get("diversity") urbanization = request.args.get("urbanization") state = request.args.get("state") statename = request.args.get("statename").replace(" ","_") global df_final fit = [] fit.append(float(income)/100) fit.append(float(housing)/100) fit.append(float(diversity)/100) fit.append(float(urbanization)/100) df_final['fit'] = df_final.apply(lambda x: abs(fit[0]-x['diversity_index'])+abs(fit[1]-x['housing_index'])+abs(fit[2]-x['income_index'])+abs(fit[3]-x['urban_index']),axis=1) if state == 'ZZ': zip_topo = r'/data/zips' state_topo = r'/data/states' geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}, {'name': 'zip_codes', 'url': zip_topo, 'feature': 'zip_codes_for_the_usa'}] vis = vincent.Map(data=df_final, geo_data=geo_data, scale=800, projection='albersUsa', data_bind='fit', data_key='zip5',brew='YlOrRd', map_key={'zip_codes': 'properties.zip'}) del vis.marks[0].properties.update vis.marks[1].properties.enter.stroke_opacity = ValueRef(value=0.05) vis.marks[0].properties.enter.stroke.value = '#C0C0C0' vis.legend(title='Preferred ZipCode') return_dict[0] = json.loads(vis.to_json()) ziplist = json.loads(df_final[['ZCTA5','ZIPName','fit']].sort(['fit']).reset_index().head(5).to_json()) table_data = [] for i in range (5): dict_row = {} dict_row['index'] = i dict_row['ZCTA5'] = ziplist['ZCTA5'][str(i)] dict_row['ZIPName'] = ziplist['ZIPName'][str(i)] table_data.append(dict_row) return_dict[1] = table_data #with open ('data.json','w') as outfile: # json.dump(lst,outfile) return json.dumps(return_dict) else: zip_topo = r'/data/state_map?state='+statename feature_name = statename+r'.geo' global land_area rank = int(land_area[statename]) if rank > 0 and rank <=1: scale = 700 elif rank >1 and rank <=3: scale = 2500 elif rank >2 and rank <=19: scale = 3000 elif rank >19 and rank <=26: scale = 4000 elif rank >26 and rank <=39: scale = 4500 elif rank >39 and rank <=40: scale = 5000 elif rank >40 and rank <=48: scale = 6000 else: scale = 23000 geo_data = [{'name': 'state', 'url': zip_topo, 'feature': feature_name}, {'name': 'zip_codes', 'url': zip_topo, 'feature': feature_name}] vis = vincent.Map(data=df_final[df_final['State']==state],geo_data=geo_data, scale=scale, projection='equirectangular', data_bind='fit', data_key='zip5',brew='YlOrRd', map_key={'zip_codes': 'id'}) del vis.marks[0].properties.update #vis.marks[0].properties.enter.stroke.value = '#C0C0C0' vis.marks[1].properties.enter.stroke_opacity = ValueRef(value=0.5) #vis.legend(title='Preferred ZipCode') return_dict[0] = json.loads(vis.to_json()) ziplist = json.loads(df_final[['ZCTA5','ZIPName','fit']][df_final['State']==state].sort(['fit']).reset_index().head(5).to_json()) table_data = [] for i in range (5): dict_row = {} dict_row['index'] = i dict_row['ZCTA5'] = ziplist['ZCTA5'][str(i)] dict_row['ZIPName'] = ziplist['ZIPName'][str(i)] table_data.append(dict_row) return_dict[1] = table_data return json.dumps(return_dict) @app.route("/data/init") def data_init(): json_data=open(Map_Init).read() data = json.loads(json_data) return json.dumps(data) @app.route("/data/states") def data_states(): json_data=open(Map_States).read() data = json.loads(json_data) return json.dumps(data) @app.route("/data/zips") def data_zips(): json_data=open(Map_Zips).read() data = json.loads(json_data) return json.dumps(data) @app.route("/data/state_map") def data_state_map(): state = request.args.get("state") map_name = Map_StateMap_Path + state + ".topo.json" json_data=open(map_name).read() data = json.loads(json_data) return json.dumps(data) ################################################################################ # Main Execution ################################################################################ if __name__ == "__main__": app.run(debug=True)
{ "repo_name": "DistrictDataLabs/03-censusables", "path": "source/app.py", "copies": "1", "size": "6266", "license": "apache-2.0", "hash": -5292001156197864000, "line_mean": 32.688172043, "line_max": 177, "alpha_frac": 0.5062240664, "autogenerated": false, "ratio": 3.607369027058146, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9558572609028605, "avg_score": 0.011004096885908154, "num_lines": 186 }
#!//anaconda/bin/python from astropy.io import fits import numpy as np import scipy as sp import itertools import argparse import os import subprocess import warnings from pprint import pprint __author__ = "Abigail Stevens, A.L.Stevens at uva.nl" """ This has not been rigorously tested. There is no 'main' to this program, only helper methods to import and be called. To assign the returned value to a variable in a bash script (in the directory containing tools.py, or with that directory added to PYTHONPATH) (where 'fun' is the function name and 'vars' are the variables to be passed in): var=$(python -c "from tools import fun; print fun('$vars')") Alternatively: python -c "from tools import fun; fun(vars)" >> a_file Abigail Stevens, A.L.Stevens at uva.nl, 2013-2015 """ ################################################################################ def get_key_val(fits_file, ext, keyword): """ Gets the value of a keyword from a FITS header. Keyword does not seem to be case-sensitive. Parameters ---------- fits_file : str The full path of the FITS file. ext : int The FITS extension in which to search for the given keyword. keyword : str The keyword for which you want the associated value. Returns ------- any type Value of the given keyword. Raises ------ IOError if the input file isn't actually a FITS file. """ ext = np.int8(ext) assert (ext >= 0 and ext <= 2) keyword = str(keyword) try: hdulist = fits.open(fits_file) except IOError: print "\tERROR: File does not exist: %s" % fits_file exit() key_value = hdulist[ext].header[keyword] hdulist.close() return key_value ################################################################################ def get_fits_tab_val(fits_file, ext, row, col): """ Gets one value from a fits data table. Parameters ---------- fits_file : str The full path of the FITS file. ext : int The FITS extension from which to get the data. row : int The row of the data table. col : int The column of the data table. Returns ------- any type Value at data[row][col]. Raises ------ IOError if the input file isn't actually a FITS file. """ ext=np.int8(ext) row=np.int(row) col=np.int(col) try: hdulist = fits.open(fits_file) except IOError: print "\tERROR: File does not exist: %s" % fits_file exit() tab_value = hdulist[ext].data[row][col] hdulist.close() return tab_value ################################################################################ def check_mode(file_list, datamode_key): """ Checks that the data mode of each file in file_list matches datamode_key. Designed for RXTE FITS files. Parameters ---------- file_list : string List of files of which to check the data mode. datamode_key : string The name of the data mode, as a FITS header keyword. Returns ------- list of strings List of files from file_list that are in datamode_key. Raises ------ Exception if the file list doesn't exist. Exception if the list is empty. """ if not os.path.isfile(file_list): raise Exception("ERROR: File list does not exist.") input_files = [line.strip() for line in open(file_list)] if not files: ## If it's an empty list raise Exception("ERROR: No files in the list %s" % file_list) good_files = [] for file in input_files: if datamode_key in get_key_val(file, 1, 'DATAMODE'): good_files.append(file) print file return good_files ################################################################################ def compute_obs_time(file_list): """ Computes the total observation time of a list of observation FITS files, in seconds. Parameters ---------- file_list : str Name of file with list of fits files of the observations. Returns ------- float The total observation time. Raises ------ Exception if the file list doesn't exist. Exception if the list is empty. """ if not os.path.isfile(file_list): raise Exception("ERROR: File list does not exist.") input_files = [line.strip() for line in open(file_list)] if not files: ## If it's an empty list raise Exception("ERROR: No files in the list %s" % file_list) total_time = 0 for file in input_files: time = float(get_key_val(file, 1, 'ONTIME')) total_time += time return total_time ## in seconds ## should double check units of times in FITS header ################################################################################ def read_obs_time(in_file): """ Read the total observation time from the header of a text file. Parameters ---------- in_file : str Name of (ASCII/txt/dat) input file with exposure time in the header. Returns ------- float The exposure time of the observation. Raises ------ Exception if the input file doesn't exist. """ if not os.path.isfile(in_file): raise Exception("ERROR: File does not exist for tools.read_obs_time.") with open(in_file, 'r') as f: for line in f: if line[0].strip() == "#": if "xposure" in line.strip(): line = line.strip().split() obs_time = float(line[-2]) return obs_time else: return 0.0 ################################################################################ def power_of_two(num): """ Checks if a positive integer is a power of 2 (1 <= num < 2147483648). Parameters ---------- num : int The number in question. Returns ------- bool True if 'num' is a power of two, False if 'num' is not. """ n = int(num) x = 2 assert n > 0, "ERROR: Number must be positive." if n == 1: return True else: while x < n and x < 2147483648: x *= 2 return n == x ################################################################################ def type_power_of_two(num): """ Checks if an input is a power of 2 (1 <= num < 2147483648), as an argparse type. Parameters ---------- num : int The number in question. Returns ------- int n, if it's a power of two Raises ------ ArgumentTypeError if n isn't a power of two. """ n = int(num) x = 2 assert n > 0 if n == 1: return n else: while x <= n and x < 2147483648: if n == x: return n x *= 2 message = "%d is not a power of two." % n raise argparse.ArgumentTypeError(message) ################################################################################ def type_positive_float(num): """ Checks if an input is a positive float, as an argparse type. Parameters ---------- num : int, long, float, or double The number in question. Returns ------- float n, if it's a positive float Raises ------ ArgumentTypeError if n isn't a real number or a positive float. """ try: n = float(num) except ValueError or TypeError: message = "%d is not a real number." % n raise argparse.ArgumentTypeError(message) if n >= 0: return n else: message = "%d is not a positive float." % n raise argparse.ArgumentTypeError(message) ################################################################################ def type_positive_int(num): """ Checks if an input is a positive integer, as an argparse type. Parameters ---------- num : int, long, float, or double The number in question. Returns ------- int n, if it's a positive integer Raises ------ ArgumentTypeError if n isn't a real number or a positive integer. """ try: n = int(num) except ValueError or TypeError: message = "Input is not a positive integer." raise argparse.ArgumentTypeError(message) if n >= 0: return n else: message = "%d is not a positive integer." % n raise argparse.ArgumentTypeError(message) ################################################################################ def pairwise(iterable): """ s -> (s0,s1), (s1,s2), (s2, s3), ... From https://docs.python.org/2/library/itertools.html#recipes Used when reading lines in the file so I can peek at the next line. Parameters ---------- an iterable, like a list or an open file Returns ------- The next two items in an iterable, like in the example a few lines above. """ a, b = itertools.tee(iterable) next(b, None) return itertools.izip(a, b) ################################################################################ def replace_key_val(fits_file, ext, keyword, value): """ Replaces the value of a keyword in a FITS header with a given value. Parameters ---------- fits_file : str The full path of the FITS file. ext : int The FITS extension in which you want to replace the keyword value. keyword : str The header keyword of the value you want to replace. value : any type The new value for the header keyword. Returns ------- nothing Raises ------ IOError if the input file isn't a FITS file. """ ext = np.int8(ext) assert (ext >= 0 and ext <= 2) keyword = str(keyword) try: hdu = fits.open(fits_file, mode='update') except IOError: print "\tERROR: File does not exist: %s" % fits_file exit() # print value # print keyword hdu[ext].header[keyword] = value # print hdu[ext].header[keyword] hdu.flush() hdu.close() ################################################################################ def time_ordered_list(file_list): """ Takes an input file containing a list of fits files, gets the start time of each file, sorts the files based on listed start time (from keyword TSTART), applies the same sort to the file names, and prints the sorted file names. Parameters ---------- file_list : str Name of a text file that is a list of FITS files, with one FITS file per line. Returns ------- nothing Raises ------ Exception if the file list doesn't exist. Exception if the list is empty. """ if not os.path.isfile(file_list): raise Exception("ERROR: File list does not exist.") files = [line.strip() for line in open(file_list)] if not files: ## If it's an empty list raise Exception("ERROR: No files in the list %s" % file_list) times = [float(get_key_val(fits_file, 1, 'TSTART')) for fits_file in files] sorted_files = [x for y,x in sorted(zip(times,files))] for filename in sorted_files: print filename ################################################################################ def obs_epoch_rxte(fits_file): """ Determines the epoch of an RXTE observation. Returns 0 if an error occurred. Future update: use MJD. WARNING: 1. This has not been rigorously tested (barely tested, really) and is not guaranteed. 2. It can only understand and parse 'DATE-OBS' keyword values with length 8 or length 19. 3. I'm interpreting the 'stop time' listed as the start time of the next epoch. Parameters ---------- fits_file : str Name of an RXTE observation FITS file (full path). Returns ------- int The RXTE observation epoch of the FITS file. Raises ------ Exception if the 'DATE-OBS' keyword value isn't 8 or 19 characters in length. Exception if it couldn't read the month, day, year, hour, or minute of the 'DATE-OBS' keyword value. UserWarning if it goes out to minute precision in determining the observation epoch, since it may not be correct. """ obs_time = get_key_val(fits_file, 0, 'DATE-OBS') # print "DATE-OBS =", obs_time # print "Length of DATE-OBS =", len(obs_time) year = -1 month = -1 day = -1 hour = -1 minute = -1 if len(obs_time) == 19: year = int(obs_time[0:4]) month = int(obs_time[5:7]) day = int(obs_time[8:10]) hour = int(obs_time[11:13]) minute = int(obs_time[14:16]) # print "Year =", year # print "Month =", month # print "Day =", day # print "Hour =", hour # print "Minute =", minute elif len(obs_time) == 8: day = int(obs_time[0:2]) month = int(obs_time[3:5]) year = obs_time[6:8] if year[0] == '9': year = int("19"+year) else: year = int("20"+year) # print "Day =", day # print "Month =", month # print "Year =", year hour = 0 minute = 0 else: raise Exception("ERROR: Format of date is not understood.") return 0 assert (year >= 1995 and year <= 2012) ## Making sure the date is actually ## when RXTE was operational if (year is -1) or \ (month is -1) or \ (day is -1) or \ (hour is -1) or \ (minute is -1): raise Exception("ERROR: Month, day, year, hour, or minute not properly assigned.") return 0 ## Determining in which epoch the date falls if year == 1995: return 1 elif year == 1996: if month < 3: return 1 elif month == 3: if day < 3: return 1 elif day > 3: return 2 else: if hour < 18: return 1 elif hour > 18: return 2 else: warning.warn("Using minute precision to determine obs epoch. May not be correct.") if minute < 33: return 1 else: return 2 elif month == 4: if day < 15: return 2 elif day > 15: return 3 else: if hour < 23: return 2 elif hour > 23: return 3 else: warning.warn("Using minute precision to determine obs epoch. May not be correct.") if minute < 5: return 2 else: return 3 else: return 3 elif year == 1997 or year == 1998: return 3 elif year == 1999: if month < 3: return 3 elif month == 3: if day < 22: return 3 elif day > 22: return 4 else: if hour < 17: return 3 elif hour > 17: return 4 else: warning.warn("Using minute precision to determine obs epoch. May not be correct.") if minute < 37: return 3 else: return 4 else: return 4 elif year == 2000: if month < 5: return 4 elif month == 5: if day < 13: return 4 elif day >= 13: return 5 ## since it changes at 00:00 else: return 5 else: return 5 ################################################################################ def make_2Dlightcurve(time, energy, n_bins, detchans, seg_start, seg_end): """ Populates a segment of a light curve with photons from the event list. Parameters ---------- time : np.array of floats 1-D array of times at which a photon is detected (assumes these times are the front of the timebin?). energy : np.array of ints 1-D array of the energy channel in which the photon is detected. n_bins : int Number of bins per segment of light curve. detchans : int Number of detector energy channels. seg_start : float Start time of the segment, in the same units as the time array. seg_end : float End time of the segment, in the same units as the time array. Returns ------- lightcurve_2d : np.array of ints 2-D array of the populated 2-dimensional light curve, with time as one axis and energy channel as the other. In units of count rate. """ ## Ranges need to be amount+1 here, because of how 'histogram2d' bins the ## values t_bin_seq = np.linspace(seg_start, seg_end, num=n_bins+1) ## defining time ## bin edges dt = t_bin_seq[1]-t_bin_seq[0] e_bin_seq = np.arange(detchans + 1) lightcurve_2d, t_bin_edges, e_bin_edges = np.histogram2d(time, energy, bins=[t_bin_seq, e_bin_seq]) lightcurve_2d /= dt ## Need /dt to have units of count rate lightcurve_2d = lightcurve_2d.astype(int) ## 1/dt is an int, so we can make ## 'lightcurve_2d' be ints here. ## lightcurve[time_bin][energy_channel] ## lightcurve[:,energy_channel] return lightcurve_2d ################################################################################ def make_1Dlightcurve(time, n_bins, seg_start, seg_end): """ Populates a segment of a light curve with photons from the event list. Parameters ---------- time : np.array of floats 1-D array of times at which a photon is detected (assumes these times are the front of the timebin?). n_bins : int Number of bins per segment of light curve. seg_start : float Start time of the segment, in the same units as the time array. seg_end : float End time of the segment, in the same units as the time array. Returns ------- lightcurve_1d : np.array of ints 1-D array of the populated 1-dimensional light curve, with time as the axis. This lightcurve is "bolometric", i.e. ignoring energy bins. In units of count rate. """ ## Ranges need to be amount+1 here, because of how 'historgram' bins the ## values ## Defining time bin edges t_bin_seq = np.linspace(seg_start, seg_end, num=n_bins+1) print "%.15f %.15f %.15f" % (t_bin_seq[0], t_bin_seq[1], t_bin_seq[2]) print "%.15f %.15f %.15f" % (time[0], time[1], time[2]) print t_bin_seq[0] - time[0] print t_bin_seq[1] - time[1] print t_bin_seq[2] - time[2] dt = t_bin_seq[1]-t_bin_seq[0] # print "%.15f" % dt lightcurve_1d, t_bin_edges = np.histogram(time, bins=t_bin_seq) lightcurve_1d /= dt ## Need /dt to have units of count rate lightcurve_1d = lightcurve_1d.astype(int) ## 1/dt is an int, so we can make ## 'lightcurve_1d' be ints here. return lightcurve_1d ################################################################################ def make_pulsation(n_bins, dt, freq, amp, mean, phase): """ Make a simulated time series with a coherent pulsation. Parameters ---------- n_bins : int Number of time bins per segment of light curve. dt : float Desired timestep between bins (or per time bin), in seconds. freq : float Desired frequency of the pulsation, in Hz. amp : float Desired amplitude of pulsation. mean : float Desired mean value of pulsation. phase : float Desired phase offset of pulsation from a sine wave, in radians. Returns ------- np.array of floats One segment of the pulsation light curve, with an amplitude per n_bin. """ binning = 10 period = 1.0 / freq ## in seconds bins_per_period = period / dt tiny_bins = np.arange(0, n_bins, 1.0/binning) smooth_sine = amp * np.sin(2.0 * np.pi * tiny_bins / bins_per_period + \ phase) + mean time_series = np.mean(np.array_split(smooth_sine, n_bins), axis=1) return time_series ################################################################################ def make_col_list(fits_file, ext, with_words, without_words): """ Makes a list of column names with specific words or phases and without specific words or phrases. """ pass # with_words=with_words.strip().split() # print with_words # without_words=without_words.strip().split() # print without_words # # file_hdu = fits.open(fits_file) # all_cols = file_hdu[ext].columns.names # # for a in with_words: # cols = filter(lambda x: a in x, all_cols) # print cols # for b in without_words: # temp = filter(lambda x: b in x, cols) # print temp # # cols.remove(temp) # print cols # # file_hdu.close() ################################################################################ def no_duplicates(txt_file): """ Reads in lines from a text file, removes duplicates (by using 'set'), and prints the non-duplicate lines back to the same text file, overwriting the previous information. Parameters ---------- txt_file : str Name of the text file containing the list to check for duplicates; one entry per line. Returns ------- nothing Raises ------ Exception if the text file doesn't exist. Exception if the text file is empty. """ if not os.path.isfile(txt_file): raise Exception("ERROR: Duplicates file does not exist.") items = [line.strip() for line in open(txt_file)] if not items: ## If it's an empty list raise Exception("ERROR: No items in the duplicate list.") no_duplicate_items = list(set(items)) with open(txt_file, 'w') as out: for thing in no_duplicate_items: out.write(thing+"\n") ################################################################################ def remove_obsIDs(totallist_file, removelist_file): """ Makes a copy of the original list file, removes elements of the list, and prints back to the original 'total list' file (overwriting it). Or use the following bash commands with awk: for element in $( cat "$file1" ); do awk "!/$element/" $file2 > dump.txt && mv dump.txt $file2 done Parameters ---------- totallist_file : str Name of file containing a list of all the obsIDs, one per line. removelist_file : str Name of file containing a list of the obsIDs to remove from the list, one per line. Returns ------- nothing Raises ------ Exception if the list of all obsIDs doesn't exist. Exception if the list of obsIDs to remove doesn't exist. Exception if list of all obsIDs is empty. Exception if list of obsIDs to remove is empty. """ if not os.path.isfile(totallist_file): raise Exception("ERROR: Total obsID list does not exist.") if not os.path.isfile(removelist_file): raise Exception("ERROR: List of obsIDs to remove does not exist.") cp_totallist = os.path.splitext(totallist_file)[0]+"_all.lst" subprocess.call(["cp", totallist_file, cp_totallist]) good_obsIDs = [line.strip() for line in open(totallist_file)] bad_obsIDs = [line.strip() for line in open(removelist_file)] if not good_obsIDs: ## If it's an empty list raise Exception("ERROR: No files in the eventlist list.") if not bad_obsIDs: ## If it's an empty list raise Exception("ERROR: No files in the eventlist list.") for item in bad_obsIDs: good_obsIDs.remove(item) with open(totallist_file, 'w') as out: for thing in good_obsIDs: out.write(thing+"\n") ################################################################################ def get_num_of_params(mod_val_string, n_spectra): n_ampersands = mod_val_string.count("&") n_params = n_ampersands / int(n_spectra) # print "Number of ampersands:", n_ampersands # print "Number of spectra:", int(n_spectra) # print "Number of parameters:", n_params return n_params ################################################################################ if __name__ == '__main__': print "\n\t\t tools.py" print "There is no 'main' to this program, only helper methods to import "\ "and be called.\n" ################################################################################
{ "repo_name": "abigailStev/whizzy_scripts", "path": "tools.py", "copies": "1", "size": "22179", "license": "mit", "hash": 8492282498997742000, "line_mean": 23.4273127753, "line_max": 87, "alpha_frac": 0.5989900356, "autogenerated": false, "ratio": 3.3172300329045767, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.44162200685045766, "avg_score": null, "num_lines": null }
import argparse from astropy.io import fits __author__ = "Abigail Stevens" __author_email__ = "A.L.Stevens at uva.nl" __year__ = "2014-2015" """ fits_info.py Prints HDU information, header keywords, column names, and some data. Easily modifiable to print whatever you want! Written in Python 2.7. """ ################################################################################ if __name__ == "__main__": ########################### ## Parsing input arguments ########################### parser = argparse.ArgumentParser(usage="python fits_info.py fits_file", \ description="Prints information about the specified fits file. Recommended for \ tables not images.") parser.add_argument('fits_file', help="The full path of the FITS file.") args = parser.parse_args() ############################################################### ## Opening the fits file using the Astropy library 'fits.open' ############################################################### try: file_hdu = fits.open(args.fits_file) except IOError: print "\tERROR: File does not exist: %s" % args.fits_file exit() ######################################################### ## Printing out the basic info on structure of FITS file ######################################################### print "\n", file_hdu.info() ######################################################## ## Printing header keywords and column names ## .header.keys outputs much cleaner than just .header ######################################################## print file_hdu[0].header.keys print "\n", file_hdu[1].header.keys # print "\n", file_hdu[2].header.keys print "\nColumns of data table in ext 1:", file_hdu[1].columns.names print "\n" ########################################## ## Other things you may want to print out ########################################## # print file_hdu[1].data[0] # print file_hdu[1].data.field(0) # print file_hdu[1].data[-1] # print "%.13f" % file_hdu[1].data[0].field(0) print file_hdu[1].data # print file_hdu[2].columns.names # print file_hdu[2].data # for col_name in file_hdu[1].columns.names: # if 'Pcu2' in col_name and 'Spec' in col_name: # print col_name ######################### ## Closing the fits file ######################### file_hdu.close() ## End of program 'fits_info.py' ################################################################################
{ "repo_name": "abigailStev/whizzy_scripts", "path": "fits_info.py", "copies": "1", "size": "2463", "license": "mit", "hash": 5055021476344963000, "line_mean": 28.6746987952, "line_max": 80, "alpha_frac": 0.47909054, "autogenerated": false, "ratio": 3.731818181818182, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47109087218181817, "avg_score": null, "num_lines": null }
#!//anaconda/bin/python import numpy as np import matplotlib.pyplot as plt from scipy.optimize import leastsq ## Levenberg-Marquadt Algorithm from astropy.io import fits import argparse import subprocess import os.path __author__ = "Abigail Stevens <A.L.Stevens at uva.nl>" """ Fits a QPO power spectrum (f*P(f)) with a power law and either a Gaussian or a Lorentzian. Not intended to be robust, just to give an idea of how to quantify the QPO in the power spectrum. 2015 """ ################################################################################ ## ## DEFINING SPECTRAL SHAPE FUNCTIONS ## The power spectrum shape functions are *f because we're fitting f*P(f) ## ################################################################################ def powerlaw(f, p): ## p[3] = power law index, p[4] = scale factor pl = np.where(f != 0, f ** (p[3]), 0.0) * p[4] * f return pl def lorentzian(f, p): ## p[0] = centroid frequency, p[1] = fwhm, p[2] = scale factor numerator = p[1] / (np.pi * 2.0) denominator = (f - p[0]) ** 2 + (1.0/2.0 * p[1]) ** 2 L = (numerator / denominator) * p[2] * f return L def gaussian(f, p): ## p[0] = mean value, p[1] = standard deviation, p[2] = scale factor exp_numerator = -(f - p[0])**2 exp_denominator = 2 * p[1]**2 G = p[2] * np.exp(exp_numerator / exp_denominator) * f return G def pl_residuals(p, npn, f): ## Residuals from fitting a power law (this will be the background that ## gets subtracted) err = npn - powerlaw(f,p) return err def L_residuals(p, npn, f): ## Residuals from fitting a Lorentzian (assumes background is already ## subtracted from npn, i.e., it's passed npn_bg_corr) err = npn - lorentzian(f,p) return err def G_residuals(p, npn, f): ## Residuals from fitting a Gaussian (assumes background is already ## subtracted from npn, i.e., it's passed npn_bg_corr) err = npn - gaussian(f,p) return err def L_pl_residuals(p, npn, f): ## Residuals from co-fitting a Lorentzian and a power law (assumes ## background is not previously subtracted) err = npn - lorentzian(f,p) - powerlaw(f,p) return err def G_pl_residuals(p, npn, f): ## Residuals from co-fitting a Gaussian and a power law (assumes background ## is not previously subtracted) err = npn - gaussian(f,p) - powerlaw(f,p) return err ################################################################################ def make_plots(freq, npn_bg_corr, best_qpo, best_pl, best_resid, npn_err): fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10)) ax1.plot(freq, npn_bg_corr, 'wo') ax1.plot(freq, best_qpo,'r--', lw=2) # ax1.plot(freq, best_pl, 'b-', lw=2) ax1.set_xscale('log') ax1.set_yscale('log') ax1.set_xlim(np.min(freq), 100) ax1.set_ylim(1e-5, 1e-1) # ax1.set_ylabel(r'Power $\times$ frequency (frac. rms$^{2}$ $\times$ Hz)', \ # fontsize=18) ax1.set_ylabel(r'Power $\times$ frequency', fontsize=18) ax2.errorbar(freq, best_resid, yerr=npn_err, linestyle=' ', ecolor='g', \ elinewidth=2, markersize=2, marker='.') ax2.hlines(0, np.min(freq), np.max(freq), linestyle='dashed', lw=2) ax2.set_xscale('log') ax2.set_xlim(np.min(freq), 100) ax2.set_ylim(-0.003, 0.0025) ax2.set_xlabel('Frequency (Hz)', fontsize=18) ax2.set_ylabel(r'f $\cdot$ P(f) Residuals', fontsize=18) fig.subplots_adjust(hspace=0) plt.savefig('PSD_fit.png') # subprocess.call(["open", "PSD_fit.png"]) ################################################################################ def get_fit(qpo_mod, freq, npn, npn_err): ########################## ## BACKGROUND SUBTRACTION ########################## ## Defining the 'background' part of the spectrum ind_bg_low = (freq > np.min(freq)) & (freq < 4.0) ind_bg_high = (freq > 7.0) & (freq < np.max(freq)) freq_bg = np.concatenate((freq[ind_bg_low], freq[ind_bg_high])) npn_bg = np.concatenate((npn[ind_bg_low], npn[ind_bg_high])) ## Subtracting off mean background value (like next one but with m=0) print "Mean value of background", np.mean(npn_bg) # npn_bg_corr = npn - np.mean(npn_bg) ## Fitting the background to a straight line # log_freq_bg = np.log(freq_bg) # log_npn_bg = np.log(npn_bg) # m, c = np.polyfit(freq_bg, npn_bg, 1) # print "Slope =", m,", Intercept = ", c # ## Removing fitted background # background = m * freq + c # npn_bg_corr = npn - background ## Fitting the background to a power law ## If using this, only fit a Lorentzian or Gaussian, not QPO + power law # p_pl = [-1.0, 0.1] # pbest = leastsq(pl_residuals, p_pl, args=(npn_bg, freq_bg), full_output=1) # best_pl_p = pbest[0] # print "Power law: index =", best_pl_p[0], ", scaling factor =", best_pl_p[1] # best_pl = powerlaw(freq, best_pl_p) # npn_bg_corr = pl_residuals(best_pl, npn, freq) npn_bg_corr = npn ## If not setting these equal, will need to return ## npn_bg_corr and ################ ## FITTING DATA ################ ## Giving initial fit parameter values p = [5.4651295, 0.3752125, 0.01, -0.5, 0.01] # p_qpo = [5.4651295, 0.3752125, 0.1] ## Use this if subtracting off the power law first ## Optimizing using least squares method if qpo_mod == "L": pbest = leastsq(L_pl_residuals, p, args=(npn_bg_corr, freq), \ full_output=1) # pbest = leastsq(L_residuals, p_qpo, args=(npn_bg_corr, freq), \ # full_output=1) else: pbest = leastsq(G_pl_residuals, p, args=(npn_bg_corr, freq), \ full_output=1) # pbest = leastsq(G_residuals, p_qpo, args=(npn_bg_corr, freq), \ # full_output=1) ## Get the best parameters from the fit best_fit = pbest[0] print best_fit return best_fit ################################################################################ def main(in_file, qpo_mod, prefix): ################ ## LOADING DATA ################ try: file_hdu = fits.open(in_file) except IOError: print "\tERROR: File does not exist: %s" % in_file exit() data = file_hdu[1].data file_hdu.close() freq = data.field('FREQUENCY') power = data.field('POWER') npn = freq * power npn_err = data.field('ERROR') * freq freq = freq[1:] power = power[1:] npn = npn[1:] npn_err = npn_err[1:] ################################# ## Get best fit of model to data ################################# best_fit = get_fit(qpo_mod, freq, npn, npn_err) ######################### ## Printing out best fit ######################### if qpo_mod == "L": print "\nBest fit: Lorentzian + Power law" print "\tCentroid:", best_fit[0] fwhm = best_fit[1] ## Fit Lorentzian to data best_qpo = lorentzian(freq, best_fit) best_resid = L_pl_residuals(best_fit, npn, freq) else: print "\nBest fit: Gaussian + Power law" print "\tMean:", best_fit[0] print "\tStd dev:", best_fit[1] fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) * best_fit[1] ## Fit Gaussian to data best_qpo = gaussian(freq, best_fit) best_resid = G_pl_residuals(best_fit, npn, freq) Q = best_fit[0] / fwhm print "\tFWHM:", fwhm print "\tQ value:", Q # print "\tQPO scale:", best_fit[2] # print "\tPL index:", best_fit[3] # print "\tPL scale:", best_fit[4] # scale_ratio = float(best_fit[4]) / float(best_fit[2]) # print "QPO to PL scale ratio: 1.0:%.3e\n" % scale_ratio ## Fit power law to data best_pl = powerlaw(freq, best_fit) print "Mean of residuals:", np.mean(best_resid) ## Printing values to a table qpofit_file = os.path.dirname(in_file)+"/"+prefix+"_QPOfit.txt" print qpofit_file with open(qpofit_file, 'a') as out: out.write("%.4f \t %.4f \t %.4f \t %.5e\n" % (best_fit[0], fwhm, Q, \ np.mean(best_resid))) ################################################### ## Plot power spectrum with best fit and residuals ################################################### make_plots(freq, npn, best_qpo, best_pl, best_resid, npn_err) ################################################################################ if __name__ == '__main__': ## Parsing input arguments and calling main parser = argparse.ArgumentParser(usage='infile [--mod QPO_MOD] [--prefix '\ 'PREFIX]', description="Fits a QPO power spectrum (f*P(f)) with a "\ "power law and either a Gaussian or a Lorentzian. Not intended to "\ "be robust, just to give an idea of how to quantify the QPO in the"\ " power spectrum.", epilog="For optional arguments, default values"\ " are given in brackets at end of description.") parser.add_argument('infile', help="Name of power spectrum to fit, in FITS"\ " format. Usually feed it the re-binned power spectrum.") parser.add_argument('--mod', dest='qpo_mod', choices=['G', 'g', 'L', 'l'], \ required=False, default='L', help="Function for QPO model: L for "\ "Lorentzian, G for Gaussian. [L]") parser.add_argument('--prefix', dest='prefix', required=False, default='--', help="The identifying prefix of the data (object nickname or "\ "proposal ID). [--]") args = parser.parse_args() qpo_mod = args.qpo_mod.upper() main(args.infile, qpo_mod, args.prefix) ################################################################################
{ "repo_name": "abigailStev/power_spectra", "path": "power_spectra/fit_qpo.py", "copies": "1", "size": "9649", "license": "mit", "hash": 2241896828218103800, "line_mean": 33.2163120567, "line_max": 93, "alpha_frac": 0.546688776, "autogenerated": false, "ratio": 3.110573823339781, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9120530568492424, "avg_score": 0.007346406169471324, "num_lines": 282 }
__author__ = "Kris Peng" # a implement of perceptron, based on Python2.7 from functools import reduce class Perceptron(object): def __init__(self, input_num, activator): # initial the perceptron self.activator = activator self.weights = [0.0 for _ in range(input_num)] self.bias = 0.0 def __str__(self): # print weight and bias return 'weights\t:%s\nbias\t:%f\n' % (self.weights, self.bias) def predict(self, input_vec): # train the data return self.activator( reduce(lambda a, b: a + b, map(lambda x_w: x_w[0] * x_w[1], # map(lambda (x, w): x * w, # Only in Python2 list(zip(input_vec, self.weights))) , 0.0) + self.bias) def train(self, input_vecs, labels, iteration, rate): for i in range(iteration): self._one_iteration(input_vecs, labels, rate) def _one_iteration(self, input_vecs, labels, rate): samples = list(zip(input_vecs, labels)) for(input_vec, label) in samples: output = self.predict(input_vec) self._update_weights(input_vec, output, label, rate) def _update_weights(self, input_vec, output, label, rate): delta = label - output self.weights = map( lambda x_w: x_w[1] + rate * delta * x_w[0], # lambda (x, w): w + rate * delta * x, list(zip(input_vec, self.weights))) self.bias += rate * delta def f(x): return 1 if x > 0 else 0 def get_training_dataset(): # and training set input_vecs = [[1, 1], [0, 0], [1, 0], [0, 1]] labels = [1, 0, 0, 0] return input_vecs, labels def train_and_perceptron(): p = Perceptron(2, f) input_vecs, labels = get_training_dataset() p.train(input_vecs, labels, 10, 0.01) return p if __name__ == '__main__': and_preceptron = train_and_perceptron() print(and_preceptron) print('1 and 1 = %d' % and_preceptron.predict([1, 1])) print('0 and 0 = %d' % and_preceptron.predict([0, 0])) print('1 and 0 = %d' % and_preceptron.predict([1, 0])) print('0 and 1 = %d' % and_preceptron.predict([0, 1]))
{ "repo_name": "KrisCheng/ML-Learning", "path": "archive/Model/basic/perceptron.py", "copies": "1", "size": "2233", "license": "mit", "hash": -2316389161905663500, "line_mean": 30.9, "line_max": 70, "alpha_frac": 0.5588893865, "autogenerated": false, "ratio": 3.114365411436541, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4173254797936541, "avg_score": null, "num_lines": null }
#!~/anaconda/bin/ python ############################################################################### # # This uses PCA analysis tools to save the Final.csv file which # will be used to rank zip codes according to the 4 parameters (income, # housing, diversity, and population density # # ############################################################################### ################################################################################ # Imports ################################################################################ import pandas as pd import numpy as np from sklearn.decomposition import PCA as sklearnPCA from sklearn import preprocessing from sklearn.preprocessing import StandardScaler ################################################################################ # File Paths ################################################################################ #File locations acs_file = "../Data/raw_files/acs5yr.csv" zillow_HVI_file = "../Data/raw_files/Zip_Zhvi_AllHomes_HomeValueIndex.csv" zillow_RI_file = "../Data/raw_files/Zip_Zri_AllHomes_RentIndex.csv" urbanization_zip = "../Data/raw_files/zcta2010_txt.csv" ZCTA = "../Data/raw_files/ZCTA.csv" Final = "../Data/final_files/Final.csv" ################################################################################ # Function Definitions ################################################################################ def pca_analysis(indexname,dataframe): df = dataframe column_count = len(df.columns) X = df.ix[:,1:column_count].values zip = df.ix[:,0].values #Standardize Data X_std = StandardScaler().fit_transform(X) #Generate PCA Components sklearn_pca = sklearnPCA(n_components=1) Y_sklearn = sklearn_pca.fit_transform(X_std) explained_ratio = sklearn_pca.explained_variance_ratio_ covariance_array = sklearn_pca.get_covariance() df_final = pd.DataFrame({'zip5':zip,indexname:Y_sklearn[:,0]}) #Normalize Data on a 0 to 1 scale #zip5_final = df_final['zip5'].values #minmax_scale = preprocessing.MinMaxScaler().fit(df_final[[indexname]]) #minmax = minmax_scale.transform(df_final[[indexname]]) #df_minmax = pd.DataFrame({'zip5':zip5_final,indexname:minmax[:,0]}) return df_final def normalize_dataframe(dataframe): zip5_final = dataframe['zip5'].values minmax_scale = preprocessing.MinMaxScaler().fit(dataframe[['income_index', 'housing_index','urban_index','diversity_index']]) df_minmax = minmax_scale.transform(dataframe[['income_index', 'housing_index','urban_index','diversity_index']]) df_minmax_final = pd.DataFrame({'zip5':zip5_final,'income_index':df_minmax[:,0],'housing_index':df_minmax[:,1],'urban_index':df_minmax[:,2],'diversity_index':df_minmax[:,3]}) return df_minmax_final ################################################################################ # Main Execution ################################################################################ def main(): #ACS DATA (Diversity, Income, and Population Density) acs = pd.read_csv(acs_file) #Generate Diversity Index from race fields diversity = acs[['zip5','pop','race_white','race_black','race_asian','race_indian','race_other','hisp']].copy(deep=True) diversity['white_hisp'] = ((diversity['pop']*diversity['race_white'])*diversity['hisp'])/diversity['pop'] diversity['white_nonhisp'] = ((diversity['pop']*diversity['race_white'])*(1-diversity['hisp']))/diversity['pop'] diversity['div_index'] = 1- (diversity['race_black']**2 + diversity['white_hisp']**2 + diversity['white_nonhisp']**2 + diversity['race_asian']**2 + diversity['race_indian']**2) diversity_index = diversity[['zip5','div_index']].dropna(axis=0,how='any',subset=['zip5','div_index']) #Generate Income Index income_index = acs[['zip5','inc_median','poverty','snap','gini_index']].dropna(axis=0,how='all') #Population Density urban = pd.read_csv(urbanization_zip) urban.rename(columns={'Zip5':'zip5'},inplace=True) urban['zip5'] = urban.apply(lambda x: int(x['zip5']),axis=1) urban['pop'] = urban.apply(lambda x: int(x['POPULATION']),axis=1) urban['urban_index'] = urban['pop']/urban['LANDSQMT'] #print urban[urban.isnull().any(axis=1)] #urban_index = urban[['zip5','urban_index']][urban['pop']>0] urban_index = urban[['zip5','urban_index']].dropna(axis=0,how='any',subset=['zip5','urban_index']) #Zillow Data (Housing Cost) zillow_HVI = pd.read_csv(zillow_HVI_file) zillow_RI = pd.read_csv(zillow_RI_file) zillow_HVI = zillow_HVI[['RegionName','2014-01','2014-07','2015-01','2015-07']] zillow_HVI.rename(columns={'RegionName':'zip5'},inplace=True) zillow_RI = zillow_RI[['RegionName','2014-01','2014-07','2015-01','2015-07']].copy(False) zillow_RI.rename(columns={'RegionName':'zip5'},inplace=True) housing_index = pd.merge (zillow_HVI, zillow_RI,how='inner', on='zip5').dropna(axis=0,how='all') housing_index.loc[housing_index['2014-07_x'].isnull(),'2014-07_x'] = housing_index['2014-01_x'] #Return PCA Dataframes df_inc = pca_analysis('income_index',income_index) df_hou = pca_analysis('housing_index',housing_index) #Reverse Housing Index so higher cost = higher index df_hou['housing_index']= df_hou.apply(lambda x: 1-x['housing_index'],axis=1) df_div = pca_analysis('diversity_index',diversity_index) df_urb = pca_analysis('urban_index',urban_index) #Combine DataFrames from each separate index df = pd.merge (df_inc,df_hou,on='zip5') df = pd.merge (df,df_urb,on='zip5') df = pd.merge (df,df_div,on='zip5') #Normalize DataFrame #This is done after the merge so we are only normalizing on zip codes that exist in all 4 data frames. df_norm = normalize_dataframe (df) #Add Zip Code Descriptions ZipCode = pd.read_csv(ZCTA) df_all_final = pd.merge (df_norm,ZipCode[['zcta5','ZIPName','State']],left_on='zip5',right_on='zcta5',copy=False) del df_all_final['zcta5'] df_all_final = pd.merge(df_all_final,urban[['zip5','ZCTA5']],copy=False) #Write DataFrame to File df_all_final.to_csv(Final) if __name__ == '__main__': main()
{ "repo_name": "DistrictDataLabs/03-censusables", "path": "source/Model/Model.py", "copies": "1", "size": "6255", "license": "apache-2.0", "hash": -475700859443467200, "line_mean": 41.8424657534, "line_max": 180, "alpha_frac": 0.5828936851, "autogenerated": false, "ratio": 3.3288983501862695, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.44117920352862694, "avg_score": null, "num_lines": null }
#!/anaconda/bin/python -O ''' attenuation.py Estimates parameters in exponential attenuations. Coded by jungerstein. For Your original research, it would be nice to contact the coder before the usage of this piece of codes in Your project. ''' from fit_exp import fit_exponential from numpy import array, zeros from fit_plot import plot_original # Temp fix. import matplotlib.pyplot as plt def running_avg(serial, half_width): smoothed = zeros(len(serial)) for i in range(len(serial)): min_subscript = max(0, i - half_width) max_subscript = min(len(serial) - 1, i + half_width) smoothed[i] = sum(serial[min_subscript:max_subscript+1]) / \ (max_subscript-min_subscript+1) return smoothed def find_peaks(serial, half_width): ''' Find peak points in time serial <i>serial</i>. INPUT serial: A time serial. OUTPUT 2 (Two) lists: the_i_peaks: the subscripts of peaks. the_y_peaks: the values at peaks. ''' smooth = running_avg(serial, half_width) the_i_peaks = array([ i + 2 for i in range(len(serial) - 4) if smooth[i] <= smooth[i+2] >= smooth[i+4] ]) the_y_peaks = array([ serial[i + 2] for i in range(len(serial) - 4) if smooth[i] <= smooth[i+2] >= smooth[i+4] ]) return the_i_peaks, the_y_peaks def evaluate_attenuation(serial, dt, scale_for_peak): ''' The interface to do the fit. serial = C + A0 exp (- gamma t) cos (omega t) INPUT serial: time series, <b> must be in numpy array! </b> dx: real number, the advance of adjacent time points in serial. scale_for_peak: integer, the half-width, in points. it should be effectively less than wave length but well above 2 OUTPUT 3 (Three) parameters A0, gamma, C <em> Yes, we have ignored omega. </em> ''' i_peaks, y_peaks = find_peaks(serial, scale_for_peak) x_peaks = dt * i_peaks plt.plot(x_peaks, y_peaks, 'b+') # Regression, roughly the_param_attenuation = fit_exponential(x_peaks, y_peaks) return the_param_attenuation
{ "repo_name": "jungerstein/plasmaPhyLib", "path": "anders/python/tools/attenuation/attenuation.py", "copies": "1", "size": "2192", "license": "apache-2.0", "hash": 4090323891174918700, "line_mean": 28.2266666667, "line_max": 68, "alpha_frac": 0.6145072993, "autogenerated": false, "ratio": 3.4465408805031448, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4561048179803145, "avg_score": null, "num_lines": null }
"""An action graph game with additive function nodes""" import itertools import numpy as np import scipy.stats as spt from gameanalysis import rsgame from gameanalysis import utils class _AgfnGame(rsgame._CompleteGame): # pylint: disable=too-many-instance-attributes,protected-access """Action graph with function nodes game Action node utilities have additive structure. Function nodes are contribution-independent. Graph is bipartite so that function nodes have in-edges only from action nodes and vise versa. Parameters ---------- role_names : (str,) The name of each role. strat_names : ((str,),) The name of each strategy per role. num_role_players : ndarray The number of players for each role. action_weights : ndarray Each entry specifies the incoming weight in the action graph for the action node (column). Must have shape (num_functions, num_strats). The action weights for a particular function can't be all zero, otherwise that function should not exist. function_inputs : ndarray Each entry specifies whether the action node (row) is an input to the function node (col). Must have shape (num_strats, num_functions). function_table : ndarray Value of arbitrary functions for a number of players activating the function. This can either have shape (num_functions, num_players + 1) or (num_functions,) + tuple(num_role_players + 1). The former treats different roles ass simply different strategy sets, the later treats each nodes inputs as distinct, and so each function maps from the number of inputs from each role. """ def __init__( # pylint: disable=too-many-arguments self, role_names, strat_names, num_role_players, action_weights, function_inputs, function_table, offsets): super().__init__(role_names, strat_names, num_role_players) self.num_functions, *_ = function_table.shape self.action_weights = action_weights self.action_weights.setflags(write=False) self.function_inputs = function_inputs self.function_inputs.setflags(write=False) self.function_table = function_table self.function_table.setflags(write=False) self.offsets = offsets self.offsets.setflags(write=False) # Pre-compute derivative info self._dinputs = np.zeros( (self.num_strats, self.num_functions, self.num_roles), bool) self._dinputs[np.arange(self.num_strats), :, self.role_indices] = ( self.function_inputs) self._dinputs.setflags(write=False) # Compute other bookmarking stuff self._basis = np.insert( np.cumprod(self.num_role_players[:0:-1] + 1)[::-1], self.num_roles - 1, 1) self._func_offset = (np.arange(self.num_functions) * np.prod(self.num_role_players + 1)) @utils.memoize def min_strat_payoffs(self): """Returns a lower bound on the payoffs.""" node_table = self.function_table.reshape((self.num_functions, -1)) minima = node_table.min(1, keepdims=True) maxima = node_table.max(1, keepdims=True) eff_min = np.where(self.action_weights > 0, minima, maxima) mins = np.einsum( 'ij,ij->j', eff_min, self.action_weights) + self.offsets mins.setflags(write=False) return mins.view() @utils.memoize def max_strat_payoffs(self): """Returns an upper bound on the payoffs.""" node_table = self.function_table.reshape((self.num_functions, -1)) minima = node_table.min(1, keepdims=True) maxima = node_table.max(1, keepdims=True) eff_max = np.where(self.action_weights > 0, maxima, minima) maxs = np.einsum( 'ij,ij->j', eff_max, self.action_weights) + self.offsets maxs.setflags(write=False) return maxs.view() def get_payoffs(self, profiles): """Returns an array of profile payoffs.""" profiles = np.asarray(profiles, int) function_inputs = np.add.reduceat( profiles[..., None, :] * self.function_inputs.T, self.role_starts, -1) inds = function_inputs.dot(self._basis) + self._func_offset function_outputs = self.function_table.ravel()[inds] payoffs = function_outputs.dot(self.action_weights) + self.offsets payoffs[profiles == 0] = 0 return payoffs # TODO override get_dev_payoffs to be more efficient, i.e. only compute the # dev payoff. def deviation_payoffs(self, mixture, *, jacobian=False, **_): # pylint: disable=too-many-locals """Get the deviation payoffs""" mixture = np.asarray(mixture, float) role_node_probs = np.minimum( np.add.reduceat(mixture[:, None] * self.function_inputs, self.role_starts), 1)[..., None] table_probs = np.ones( (self.num_roles, self.num_functions) + tuple(self.num_role_players + 1), float) for i, (num_play, probs) in enumerate(zip(self.num_role_players, role_node_probs)): role_probs = spt.binom.pmf( np.arange(num_play + 1), num_play, probs) dev_role_probs = spt.binom.pmf( np.arange(num_play + 1), num_play - 1, probs) new_shape = [self.num_functions] + [1] * self.num_roles new_shape[i + 1] = num_play + 1 role_probs.shape = new_shape dev_role_probs.shape = new_shape table_probs[:i] *= role_probs table_probs[i] *= dev_role_probs table_probs[i + 1:] *= role_probs dev_probs = table_probs.repeat(self.num_role_strats, 0) for role, (rinps, rdev_probs) in enumerate(zip( np.split(self.function_inputs, self.role_starts[1:], 0), np.split(dev_probs, self.role_starts[1:], 0))): rdev_probs[rinps] = np.roll(rdev_probs[rinps], 1, role + 1) dev_vals = np.reshape(dev_probs * self.function_table, (self.num_strats, self.num_functions, -1)) devs = (np.einsum('ijk,ji->i', dev_vals, self.action_weights) + self.offsets) if not jacobian: return devs deriv = np.empty((self.num_roles, self.num_roles, self.num_functions) + tuple(self.num_role_players + 1), float) for i, (num_play, probs, zprob) in enumerate(zip( self.num_role_players, role_node_probs, self.zero_prob)): # TODO This zprob threshold causes large errors in the jacobian # when we look at sparse mixtures. This should probably be # addressed, but it's unclear how without making this significantly # slower. configs = np.arange(num_play + 1) der = (configs / (probs + zprob) - configs[::-1] / (1 - probs + zprob)) dev_der = np.insert( configs[:-1] / (probs + zprob) - configs[-2::-1] / (1 - probs + zprob), num_play, 0, 1) new_shape = [self.num_functions] + [1] * self.num_roles new_shape[i + 1] = num_play + 1 der.shape = new_shape dev_der.shape = new_shape deriv[:i, i] = der deriv[i, i] = dev_der deriv[i + 1:, i] = der dev_deriv = np.rollaxis(deriv, 2, 1).repeat(self.num_role_strats, 0) for role, (rinps, rdev_deriv) in enumerate(zip( np.split(self.function_inputs, self.role_starts[1:], 0), np.split(dev_deriv, self.role_starts[1:], 0))): rdev_deriv[rinps] = np.roll(rdev_deriv[rinps], 1, role + 2) dev_values = dev_probs[:, :, None] * \ dev_deriv * self.function_table[:, None] dev_values.shape = (self.num_strats, self.num_functions, self.num_roles, -1) jac = np.einsum('iklm,jkl,ki->ij', dev_values, self._dinputs, self.action_weights) return devs, jac def _add_constant(self, constant): off = np.broadcast_to(constant, self.num_roles).repeat( self.num_role_strats) return _AgfnGame( self.role_names, self.strat_names, self.num_role_players, self.action_weights, self.function_inputs, self.function_table, self.offsets + off) def _multiply_constant(self, constant): mul = np.broadcast_to(constant, self.num_roles).repeat( self.num_role_strats) return _AgfnGame( self.role_names, self.strat_names, self.num_role_players, self.action_weights * mul, self.function_inputs, self.function_table, self.offsets * mul) def _add_game(self, othr): try: return _AgfnGame( self.role_names, self.strat_names, self.num_role_players, np.concatenate([self.action_weights, othr.action_weights]), np.concatenate([self.function_inputs, othr.function_inputs], 1), np.concatenate([self.function_table, othr.function_table]), self.offsets + othr.offsets) except AttributeError: return NotImplemented def restrict(self, restriction): restriction = np.asarray(restriction, bool) base = rsgame.empty_copy(self).restrict(restriction) action_weights = self.action_weights[:, restriction] func_mask = np.any(~np.isclose(action_weights, 0), 1) return _AgfnGame( base.role_names, base.strat_names, base.num_role_players, action_weights[func_mask], self.function_inputs[:, func_mask][restriction], self.function_table[func_mask], self.offsets[restriction]) def to_json(self): res = super().to_json() res['function_inputs'] = [ self.restriction_to_json(finp) for finp in self.function_inputs.T] res['action_weights'] = [ self.payoff_to_json(ws) for ws in self.action_weights] # XXX This will fail if a role has the name 'value', do we care? res['function_tables'] = [ [dict(zip(self.role_names, (c.item() for c in counts)), value=val) for val, *counts in zip( tab.ravel(), *np.indices(tab.shape).reshape(self.num_roles, -1)) if val != 0] for tab in self.function_table] if not np.allclose(self.offsets, 0): res['offsets'] = self.payoff_to_json(self.offsets) res['type'] = 'aggfn.3' return res def __repr__(self): return '{old}, {nfuncs:d})'.format( old=super().__repr__()[:-1], nfuncs=self.num_functions) def __eq__(self, othr): return (super().__eq__(othr) and self.num_functions == othr.num_functions and np.allclose(self.offsets, othr.offsets) and utils.allclose_perm( np.concatenate( [self.action_weights, self.function_inputs.T, self.function_table.reshape(self.num_functions, -1)], 1), np.concatenate( [othr.action_weights, othr.function_inputs.T, othr.function_table.reshape(othr.num_functions, -1)], 1))) @utils.memoize def __hash__(self): return hash(( super().__hash__(), np.sort(utils.axis_to_elem(self.function_inputs.T)).tobytes())) def aggfn( # pylint: disable=too-many-arguments num_role_players, num_role_strats, action_weights, function_inputs, function_table, offsets=None): """Create an Aggfn with default names Parameters ---------- num_role_players : ndarray The number of players per role. num_role_strats : ndarray The number of strategies per role. action_weights : ndarray, float The action weights. function_inputs : ndarray, bool The input mask for each function. function_table : ndarray, float The function value relative to number of incoming edges. offsets : ndarray, float, optional A constant offset for each strategies payoff. Constant functions are not allowed in the function table as they are clutter, instead, constant functions can be specified here. """ return aggfn_replace( rsgame.empty(num_role_players, num_role_strats), action_weights, function_inputs, function_table, offsets) def aggfn_names( # pylint: disable=too-many-arguments role_names, num_role_players, strat_names, action_weights, function_inputs, function_table, offsets=None): """Create an Aggfn with specified names Parameters ---------- role_names : [str] The name of each role. num_role_players : ndarray The number of players for each role. strat_names : [[str]] The name of each strategy for each role. action_weights : ndarray The mapping of each function to the strategy weight for a player. function_inpits : ndarray The mask indicating which strategies are inputs to which function. offsets : ndarray, float, optional A constant offset for each strategies payoff. Constant functions are not allowed in the function table as they are clutter, instead, constant functions can be specified here. """ return aggfn_replace( rsgame.empty_names(role_names, num_role_players, strat_names), action_weights, function_inputs, function_table, offsets) # TODO Make aggfn_copy method that will clone the aggfn game if it is one, # else, it will regress on profiles to compute one. def aggfn_replace(copy_game, action_weights, function_inputs, function_table, offsets=None): """Replace an existing game with an Aggfn Parameters ---------- copy_game : RsGame The game to take game structure from. action_weights : ndarray-like The weights of each function to player payoffs. function_inputs : ndarray-like The mask of each strategy to function. function_table : ndarray-like The lookup table of number of incoming edges to function value. offsets : ndarray, float, optional A constant offset for each strategies payoff. Constant functions are not allowed in the function table as they are clutter, instead, constant functions can be specified here. """ if offsets is None: offsets = np.zeros(copy_game.num_strats) action_weights = np.asarray(action_weights, float) function_inputs = np.asarray(function_inputs, bool) function_table = np.asarray(function_table, float) offsets = np.asarray(offsets, float) num_funcs, *one_plays = function_table.shape utils.check(num_funcs > 0, 'must have at least one function') utils.check( action_weights.shape == (num_funcs, copy_game.num_strats), 'action_weights must have shape (num_functions, num_strats) but got ' '{}', action_weights.shape) utils.check( function_inputs.shape == (copy_game.num_strats, num_funcs), 'function_inputs must have shape (num_strats, num_functions) but got ' '{}', function_inputs.shape) utils.check( not function_inputs.all(0).any(), "can't have a function with input from every strategy") utils.check( function_inputs.any(0).all(), 'every function must take input from at least one strategy') utils.check( one_plays == list(copy_game.num_role_players + 1), 'function_table must have shape ' '(num_functions, ... num_role_players + 1) but got {}', function_table.shape) utils.check( not np.isclose( function_table.reshape((num_funcs, -1))[:, 0, None], function_table.reshape((num_funcs, -1))).all(1).any(), "a function can't be constant (all identical values)") utils.check( not np.isclose(action_weights, 0).all(1).any(), "a function can't have actions weights of all zero") utils.check( offsets.shape == (copy_game.num_strats,), 'offsets must have shape (num_strats,) but got {}', offsets.shape) return _AgfnGame( copy_game.role_names, copy_game.strat_names, copy_game.num_role_players, action_weights, function_inputs, function_table, offsets) def aggfn_funcs( # pylint: disable=too-many-arguments num_role_players, num_role_strats, action_weights, function_inputs, functions, offsets=None): """Construct and Aggfn with functions This is generally less efficient than just constructing the function table using vectorized operations or an existing function table. Parameters ---------- num_role_players : ndarray The number of players per role. num_role_strats : ndarray The number of strategies per role. action_weights : ndarray, float The action weights. function_inputs : ndarray, bool The input mask for each function. functions : [(nr1, nr2, ...) -> float] List of functions that maps the player per role activations to a single value. The number of ordered arguments will be inferred from each function. """ utils.check(functions, 'must have at least one function') num_functions = len(functions) base = rsgame.empty(num_role_players, num_role_strats) function_table = np.empty( (num_functions,) + tuple(base.num_role_players + 1), float) for func, tab in zip(functions, function_table): for play in itertools.product(*map(range, base.num_role_players + 1)): tab[play] = func(*play) return aggfn_replace( base, action_weights, function_inputs, function_table, offsets) def aggfn_json(json): # pylint: disable=too-many-locals """Read an Aggfn from json Json versions of the game will generally have 'type': 'aggfn...' in them, but as long as the proper fields exist, this will succeed.""" base = rsgame.empty_json(json) _, version = json.get('type', '.3').split('.', 1) utils.check( version == '3', 'parsing versions below 3 is currently unsupported') num_functions = len(json['function_tables']) function_inputs = np.empty((base.num_strats, num_functions), bool) action_weights = np.empty((num_functions, base.num_strats)) function_table = np.empty( (num_functions,) + tuple(base.num_role_players + 1)) offsets = np.empty(base.num_strats) base.payoff_from_json(json.get('offsets', {}), offsets) for inps, jinps in zip(function_inputs.T, json['function_inputs']): base.restriction_from_json(jinps, inps, verify=False) for weights, jweights in zip(action_weights, json['action_weights']): base.payoff_from_json(jweights, weights) function_table.fill(0) for table, jtable in zip(function_table, json['function_tables']): for elem in jtable: copy = elem.copy() value = copy.pop('value') table[tuple(int(i) for i in base.role_from_json(copy))] = value return aggfn_replace( base, action_weights, function_inputs, function_table, offsets)
{ "repo_name": "egtaonline/GameAnalysis", "path": "gameanalysis/aggfn.py", "copies": "1", "size": "19626", "license": "apache-2.0", "hash": -3368134899194299000, "line_mean": 40.4926004228, "line_max": 102, "alpha_frac": 0.6085804545, "autogenerated": false, "ratio": 3.8550383028874484, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49636187573874485, "avg_score": null, "num_lines": null }
# An action is permitted in any of the following scenarios: # # 1) the permissions field for the specified action contains the magic value # 'group:__world__' # # 2) the user and consumer match those of the annotation (i.e. the # authenticated user is the owner of the annotation) # # 3) a user and consumer are provided and the permissions field contains the # magic value 'group:__authenticated__' # # 4) the provided consumer matches that of the annotation and the permissions # field for the specified action contains the magic value # 'group:__consumer__' # # 5) the consumer matches that of the annotation and the user is listed in the # permissions field for the specified action # # 6) the consumer matches that of the annotation and the user is an admin GROUP_WORLD = 'group:__world__' GROUP_AUTHENTICATED = 'group:__authenticated__' GROUP_CONSUMER = 'group:__consumer__' def authorize(annotation, action, user=None): action_field = annotation.get('permissions', {}).get(action, []) # Scenario 1 if GROUP_WORLD in action_field: return True elif user is not None: # Fail fast if this looks dodgy if user.id.startswith('group:'): return False ann_uid, ann_ckey = _annotation_owner(annotation) # Scenario 2 if (user.id, user.consumer.key) == (ann_uid, ann_ckey): return True # Scenario 3 elif GROUP_AUTHENTICATED in action_field: return True # Scenario 4 elif user.consumer.key == ann_ckey and GROUP_CONSUMER in action_field: return True # Scenario 5 elif user.consumer.key == ann_ckey and user.id in action_field: return True # Scenario 6 elif user.consumer.key == ann_ckey and user.is_admin: return True return False def _annotation_owner(annotation): user = annotation.get('user') consumer = annotation.get('consumer') if not user: return (user, consumer) try: return (user.get('id', None), consumer) except AttributeError: return (user, consumer) def permissions_filter(user=None): """Filter an ElasticSearch query by the permissions of the current user""" # Scenario 1 perm_f = {'term': {'permissions.read': GROUP_WORLD}} if user is not None: # Fail fast if this looks dodgy if user.id.startswith('group:'): return False perm_f = {'or': [perm_f]} # Scenario 2 perm_f['or'].append( {'term': {'consumer': user.consumer.key}}) # Scenario 3 perm_f['or'].append( {'term': {'permissions.read': GROUP_AUTHENTICATED}}) # Scenario 4 perm_f['or'].append( {'and': [{'term': {'consumer': user.consumer.key}}, {'term': {'permissions.read': GROUP_CONSUMER}}]}) # Scenario 5 perm_f['or'].append( {'and': [{'term': {'consumer': user.consumer.key}}, {'term': {'permissions.read': user.id}}]}) # Scenario 6 if user.is_admin: perm_f['or'].append({'term': {'consumer': user.consumer.key}}) return perm_f
{ "repo_name": "nobita-isc/annotator-store", "path": "annotator/authz.py", "copies": "1", "size": "3226", "license": "mit", "hash": -1511482796902639900, "line_mean": 28.0630630631, "line_max": 78, "alpha_frac": 0.6029138252, "autogenerated": false, "ratio": 4.120051085568327, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5222964910768327, "avg_score": null, "num_lines": null }
# An action map represents a group of tiles mapping from the 'dungeon.png' spritesheet. # A randomly generated map is in the constructor. # TODO: Create more interesting maps. # TODO: Allow map expansion. class ActionMap(): tiles = [] # Tiles is a list of lists of rectangles. Reference a tile with tiles[x][y]. MAP_WIDTH_IN_TILES = 1000 MAP_HEIGHT_IN_TILES = 1000 # The folling are indices of tiles in the spritesheet. T_EMPTY = 69 T_LAND_BL = 0 # BL = bottom left. T_LAND_BR = 4 # BR = bottom right. T_LAND_TL = 84 # TL = top left. T_LAND_TR = 88 # TR = top right. T_LAND_B = 1 T_LAND_B_DROP = 6 # Drop is a tile below a bottom one. T_LAND_T = 85 T_LAND_L = 63 T_LAND_R = 67 T_LAND_MID = 44 # A generic land tile. def __init__(self): # Iintialize all tiles to empty for x in range(0, self.MAP_WIDTH_IN_TILES): self.tiles.append([]) for y in range(0, self.MAP_HEIGHT_IN_TILES): self.tiles[x].append(self.T_EMPTY) self.create_island(10, 10, 30, 30) self.create_island(100, 100, 60, 60) def get_tile(self, x, y): if x < 0 or y < 0 or x > self.MAP_WIDTH_IN_TILES or y > self.MAP_HEIGHT_IN_TILES: return self.T_EMPTY return self.tiles[x][y] def width(self): return self.MAP_WIDTH_IN_TILES def height(self): return self.MAP_HEIGHT_IN_TILES # Create a grouping of interesting dungeon stuff at the specified coordinates and size def create_island(self, x, y, width, height): if x <= 0 or y <= 0 or x + width > self.MAP_WIDTH_IN_TILES \ or y + height > self.MAP_HEIGHT_IN_TILES: print("Out of bounds for creating island") return # Corners self.tiles[x][y] = self.T_LAND_BL self.tiles[x + width][y] = self.T_LAND_BR self.tiles[x][y + height] = self.T_LAND_TL self.tiles[x + width][y + height] = self.T_LAND_TR # Special bottom tiles below land self.tiles[x][y - 1] = self.T_LAND_B_DROP self.tiles[x + width][y - 1] = self.T_LAND_B_DROP # Top and bottom for i in range(1, width): self.tiles[x + i][y] = self.T_LAND_B self.tiles[x + i][y + height] = self.T_LAND_T self.tiles[x + i][y - 1] = self.T_LAND_B_DROP # Left and right for j in range(1, height): self.tiles[x][y + j] = self.T_LAND_L self.tiles[x + width][y + j] = self.T_LAND_R # All middle tiles for i in range(1, width): for j in range(1, height): self.tiles[x + i][y + j] = self.T_LAND_MID
{ "repo_name": "Corrob/Action-Game", "path": "action_map.py", "copies": "1", "size": "2751", "license": "mit", "hash": -1964015958565858000, "line_mean": 37.2083333333, "line_max": 91, "alpha_frac": 0.5561613959, "autogenerated": false, "ratio": 3.0566666666666666, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41128280625666663, "avg_score": null, "num_lines": null }
""" An action that resets *all* perspectives. """ # Enthought library imports. from enthought.pyface.api import YES # Local imports. from workbench_action import WorkbenchAction # The message used when confirming the action. MESSAGE = 'Do you want to reset ALL perspectives to their defaults?' class ResetAllPerspectivesAction(WorkbenchAction): """ An action that resets *all* perspectives. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'enthought.pyface.workbench.action.reset_all_perspectives' # The action's name (displayed on menus/tool bar tools etc). name = 'Reset All Perspectives' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ window = self.window if window.confirm(MESSAGE) == YES: window.reset_all_perspectives() return #### EOF ######################################################################
{ "repo_name": "enthought/traitsgui", "path": "enthought/pyface/workbench/action/reset_all_perspectives_action.py", "copies": "1", "size": "1179", "license": "bsd-3-clause", "hash": -6225161870719519000, "line_mean": 28.475, "line_max": 79, "alpha_frac": 0.5131467345, "autogenerated": false, "ratio": 4.754032258064516, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5767178992564516, "avg_score": null, "num_lines": null }
""" An action that resets *all* perspectives. """ # Enthought library imports. from pyface.api import YES # Local imports. from workbench_action import WorkbenchAction # The message used when confirming the action. MESSAGE = 'Do you want to reset ALL perspectives to their defaults?' class ResetAllPerspectivesAction(WorkbenchAction): """ An action that resets *all* perspectives. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'pyface.workbench.action.reset_all_perspectives' # The action's name (displayed on menus/tool bar tools etc). name = 'Reset All Perspectives' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ window = self.window if window.confirm(MESSAGE) == YES: window.reset_all_perspectives() return #### EOF ######################################################################
{ "repo_name": "pankajp/pyface", "path": "pyface/workbench/action/reset_all_perspectives_action.py", "copies": "2", "size": "1159", "license": "bsd-3-clause", "hash": 441862976656404700, "line_mean": 27.975, "line_max": 79, "alpha_frac": 0.5064710958, "autogenerated": false, "ratio": 4.829166666666667, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6335637762466666, "avg_score": null, "num_lines": null }
""" An action that resets the active perspective. """ # Enthought library imports. from enthought.pyface.api import YES # Local imports. from workbench_action import WorkbenchAction # The message used when confirming the action. MESSAGE = 'Do you want to reset the current "%s" perspective to its defaults?' class ResetActivePerspectiveAction(WorkbenchAction): """ An action that resets the active perspective. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'enthought.pyface.workbench.action.reset_active_perspective' # The action's name (displayed on menus/tool bar tools etc). name = 'Reset Perspective' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ window = self.window if window.confirm(MESSAGE % window.active_perspective.name) == YES: window.reset_active_perspective() return #### EOF ######################################################################
{ "repo_name": "enthought/traitsgui", "path": "enthought/pyface/workbench/action/reset_active_perspective_action.py", "copies": "1", "size": "1231", "license": "bsd-3-clause", "hash": -745904422573880300, "line_mean": 29.775, "line_max": 79, "alpha_frac": 0.5272136474, "autogenerated": false, "ratio": 4.904382470119522, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000625, "num_lines": 40 }
""" An action that resets the active perspective. """ # Enthought library imports. from pyface.api import YES # Local imports. from workbench_action import WorkbenchAction # The message used when confirming the action. MESSAGE = 'Do you want to reset the current "%s" perspective to its defaults?' class ResetActivePerspectiveAction(WorkbenchAction): """ An action that resets the active perspective. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'pyface.workbench.action.reset_active_perspective' # The action's name (displayed on menus/tool bar tools etc). name = 'Reset Perspective' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ window = self.window if window.confirm(MESSAGE % window.active_perspective.name) == YES: window.reset_active_perspective() return #### EOF ######################################################################
{ "repo_name": "brett-patterson/pyface", "path": "pyface/workbench/action/reset_active_perspective_action.py", "copies": "2", "size": "1211", "license": "bsd-3-clause", "hash": 4954393924570950000, "line_mean": 29.275, "line_max": 79, "alpha_frac": 0.5210569777, "autogenerated": false, "ratio": 4.983539094650205, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6504596072350205, "avg_score": null, "num_lines": null }
""" An action that resets the active perspective. """ # Enthought library imports. from pyface.api import YES # Local imports. from .workbench_action import WorkbenchAction # The message used when confirming the action. MESSAGE = 'Do you want to reset the current "%s" perspective to its defaults?' class ResetActivePerspectiveAction(WorkbenchAction): """ An action that resets the active perspective. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'pyface.workbench.action.reset_active_perspective' # The action's name (displayed on menus/tool bar tools etc). name = 'Reset Perspective' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ window = self.window if window.confirm(MESSAGE % window.active_perspective.name) == YES: window.reset_active_perspective() return #### EOF ######################################################################
{ "repo_name": "geggo/pyface", "path": "pyface/workbench/action/reset_active_perspective_action.py", "copies": "3", "size": "1212", "license": "bsd-3-clause", "hash": 3126381738285527000, "line_mean": 29.3, "line_max": 79, "alpha_frac": 0.5206270627, "autogenerated": false, "ratio": 4.967213114754099, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000625, "num_lines": 40 }
""" An action that sets the active perspective. """ # Enthought library imports. from enthought.pyface.workbench.api import IPerspective from enthought.traits.api import Delegate, Instance, on_trait_change # Local imports. from workbench_action import WorkbenchAction class SetActivePerspectiveAction(WorkbenchAction): """ An action that sets the active perspective. """ #### 'Action' interface ################################################### # Is the action enabled? enabled = Delegate('perspective') # The action's unique identifier (may be None). id = Delegate('perspective') # The action's name (displayed on menus/tool bar tools etc). name = Delegate('perspective') # The action's style. style = 'radio' #### 'SetActivePerspectiveAction' interface ############################### # The perspective that we set the active perspective to. perspective = Instance(IPerspective) ########################################################################### # 'Action' interface. ########################################################################### def destroy(self): """ Destroy the action. """ self.window = None return def perform(self, event): """ Perform the action. """ self.window.active_perspective = self.perspective return ########################################################################### # Private interface. ########################################################################### @on_trait_change('perspective,window.active_perspective') def _refresh_checked(self): """ Refresh the checked state of the action. """ self.checked = self.perspective is not None \ and self.window is not None \ and self.window.active_perspective is not None \ and self.perspective.id is self.window.active_perspective.id return #### EOF ######################################################################
{ "repo_name": "enthought/traitsgui", "path": "enthought/pyface/workbench/action/set_active_perspective_action.py", "copies": "1", "size": "2040", "license": "bsd-3-clause", "hash": -5059343087948970000, "line_mean": 29.447761194, "line_max": 79, "alpha_frac": 0.5093137255, "autogenerated": false, "ratio": 5.125628140703518, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0013960247699197972, "num_lines": 67 }
""" An action that sets the active perspective. """ # Enthought library imports. from pyface.workbench.api import IPerspective from traits.api import Delegate, Instance, on_trait_change # Local imports. from .workbench_action import WorkbenchAction class SetActivePerspectiveAction(WorkbenchAction): """ An action that sets the active perspective. """ #### 'Action' interface ################################################### # Is the action enabled? enabled = Delegate('perspective') # The action's unique identifier (may be None). id = Delegate('perspective') # The action's name (displayed on menus/tool bar tools etc). name = Delegate('perspective') # The action's style. style = 'radio' #### 'SetActivePerspectiveAction' interface ############################### # The perspective that we set the active perspective to. perspective = Instance(IPerspective) ########################################################################### # 'Action' interface. ########################################################################### def destroy(self): """ Destroy the action. """ self.window = None return def perform(self, event): """ Perform the action. """ self.window.active_perspective = self.perspective return ########################################################################### # Private interface. ########################################################################### @on_trait_change('perspective,window.active_perspective') def _refresh_checked(self): """ Refresh the checked state of the action. """ self.checked = self.perspective is not None \ and self.window is not None \ and self.window.active_perspective is not None \ and self.perspective.id is self.window.active_perspective.id return #### EOF ######################################################################
{ "repo_name": "geggo/pyface", "path": "pyface/workbench/action/set_active_perspective_action.py", "copies": "3", "size": "2021", "license": "bsd-3-clause", "hash": -2923508972164040000, "line_mean": 29.1641791045, "line_max": 79, "alpha_frac": 0.5051954478, "autogenerated": false, "ratio": 5.168797953964194, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7173993401764195, "avg_score": null, "num_lines": null }
""" An action that shows a dialog to allow the user to choose a view. """ # Local imports. from view_chooser import ViewChooser from workbench_action import WorkbenchAction class ShowViewAction(WorkbenchAction): """ An action that shows a dialog to allow the user to choose a view. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'enthought.pyface.workbench.action.show_view' # The action's name (displayed on menus/tool bar tools etc). name = 'Show View' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ chooser = ViewChooser(window=self.window) ui = chooser.edit_traits(parent=self.window.control, kind='livemodal') # If the user closes the dialog by using the window manager's close button # (e.g. the little [x] in the top corner), ui.result is True, but chooser.view # might be None, so we need an explicit check for that. if ui.result and chooser.view is not None: # This shows the view... chooser.view.show() # ... and this makes it active (brings it to the front, gives it # focus etc). chooser.view.activate() return #### EOF ######################################################################
{ "repo_name": "enthought/traitsgui", "path": "enthought/pyface/workbench/action/show_view_action.py", "copies": "1", "size": "1549", "license": "bsd-3-clause", "hash": 2612984805936846000, "line_mean": 34.2045454545, "line_max": 86, "alpha_frac": 0.5196901227, "autogenerated": false, "ratio": 4.766153846153846, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0011032374010147173, "num_lines": 44 }
""" An action that shows a dialog to allow the user to choose a view. """ # Local imports. from .view_chooser import ViewChooser from .workbench_action import WorkbenchAction class ShowViewAction(WorkbenchAction): """ An action that shows a dialog to allow the user to choose a view. """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = 'pyface.workbench.action.show_view' # The action's name (displayed on menus/tool bar tools etc). name = 'Show View' ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ chooser = ViewChooser(window=self.window) ui = chooser.edit_traits(parent=self.window.control, kind='livemodal') # If the user closes the dialog by using the window manager's close button # (e.g. the little [x] in the top corner), ui.result is True, but chooser.view # might be None, so we need an explicit check for that. if ui.result and chooser.view is not None: # This shows the view... chooser.view.show() # ... and this makes it active (brings it to the front, gives it # focus etc). chooser.view.activate() return #### EOF ######################################################################
{ "repo_name": "geggo/pyface", "path": "pyface/workbench/action/show_view_action.py", "copies": "3", "size": "1541", "license": "bsd-3-clause", "hash": 3706138466711397400, "line_mean": 34.0227272727, "line_max": 86, "alpha_frac": 0.5165476963, "autogenerated": false, "ratio": 4.7708978328173375, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0011032374010147173, "num_lines": 44 }
""" An action that toggles a view's visibility (ie. hides/shows it). """ # Enthought library imports. from enthought.pyface.workbench.api import IView from enthought.traits.api import Delegate, Instance # Local imports. from workbench_action import WorkbenchAction class ToggleViewVisibilityAction(WorkbenchAction): """ An action that toggles a view's visibility (ie. hides/shows it). """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = Delegate('view', modify=True) # The action's name (displayed on menus/tool bar tools etc). name = Delegate('view', modify=True) # The action's style. style = 'toggle' #### 'ViewAction' interface ############################################### # The view that we toggle the visibility for. view = Instance(IView) ########################################################################### # 'Action' interface. ########################################################################### def destroy(self): """ Called when the action is no longer required. """ if self.view is not None: self._remove_view_listeners(self.view) return def perform(self, event): """ Perform the action. """ self._toggle_view_visibility(self.view) return ########################################################################### # Private interface. ########################################################################### #### Trait change handlers ################################################ def _view_changed(self, old, new): """ Static trait change handler. """ if old is not None: self._remove_view_listeners(old) if new is not None: self._add_view_listeners(new) self._refresh_checked() return #### Methods ############################################################## def _add_view_listeners(self, view): """ Add listeners for trait events on a view. """ view.on_trait_change(self._refresh_checked, 'visible') view.on_trait_change(self._refresh_checked, 'window') return def _remove_view_listeners(self, view): """ Add listeners for trait events on a view. """ view.on_trait_change(self._refresh_checked, 'visible', remove=True) view.on_trait_change(self._refresh_checked, 'window', remove=True) return def _refresh_checked(self): """ Refresh the checked state of the action. """ self.checked = self.view is not None \ and self.view.window is not None \ and self.view.visible return def _toggle_view_visibility(self, view): """ Toggle the visibility of a view. """ if view.visible: view.hide() else: view.show() return #### EOF ######################################################################
{ "repo_name": "enthought/traitsgui", "path": "enthought/pyface/workbench/action/toggle_view_visibility_action.py", "copies": "1", "size": "3044", "license": "bsd-3-clause", "hash": 1520662447386261800, "line_mean": 27.4485981308, "line_max": 79, "alpha_frac": 0.4878449409, "autogenerated": false, "ratio": 4.878205128205129, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5866050069105129, "avg_score": null, "num_lines": null }
""" An action that toggles a view's visibility (ie. hides/shows it). """ # Enthought library imports. from pyface.workbench.api import IView from traits.api import Delegate, Instance # Local imports. from .workbench_action import WorkbenchAction class ToggleViewVisibilityAction(WorkbenchAction): """ An action that toggles a view's visibility (ie. hides/shows it). """ #### 'Action' interface ################################################### # The action's unique identifier (may be None). id = Delegate('view', modify=True) # The action's name (displayed on menus/tool bar tools etc). name = Delegate('view', modify=True) # The action's style. style = 'toggle' #### 'ViewAction' interface ############################################### # The view that we toggle the visibility for. view = Instance(IView) ########################################################################### # 'Action' interface. ########################################################################### def destroy(self): """ Called when the action is no longer required. """ if self.view is not None: self._remove_view_listeners(self.view) return def perform(self, event): """ Perform the action. """ self._toggle_view_visibility(self.view) return ########################################################################### # Private interface. ########################################################################### #### Trait change handlers ################################################ def _view_changed(self, old, new): """ Static trait change handler. """ if old is not None: self._remove_view_listeners(old) if new is not None: self._add_view_listeners(new) self._refresh_checked() return #### Methods ############################################################## def _add_view_listeners(self, view): """ Add listeners for trait events on a view. """ view.on_trait_change(self._refresh_checked, 'visible') view.on_trait_change(self._refresh_checked, 'window') return def _remove_view_listeners(self, view): """ Add listeners for trait events on a view. """ view.on_trait_change(self._refresh_checked, 'visible', remove=True) view.on_trait_change(self._refresh_checked, 'window', remove=True) return def _refresh_checked(self): """ Refresh the checked state of the action. """ self.checked = self.view is not None \ and self.view.window is not None \ and self.view.visible return def _toggle_view_visibility(self, view): """ Toggle the visibility of a view. """ if view.visible: view.hide() else: view.show() return #### EOF ######################################################################
{ "repo_name": "geggo/pyface", "path": "pyface/workbench/action/toggle_view_visibility_action.py", "copies": "3", "size": "3025", "license": "bsd-3-clause", "hash": -7604641349772746000, "line_mean": 27.2710280374, "line_max": 79, "alpha_frac": 0.4849586777, "autogenerated": false, "ratio": 4.902755267423014, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6887713945123015, "avg_score": null, "num_lines": null }
"""An action to open various source files. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2008, Enthought, Inc. # License: BSD Style. # Standard library imports. from os.path import isfile import new # Enthought library imports. from traits.api import Instance, Str from pyface.api import FileDialog, OK from pyface.action.api import Action # Local imports from mayavi.plugins.script import get_imayavi from mayavi.core.common import error from mayavi.core.metadata import Metadata from mayavi.core.registry import registry ###################################################################### # Utility functions ###################################################################### def get_scene(mayavi): """Given a mayavi script instance, get the current scene. If none is available create a new one. """ s = mayavi.engine.current_scene if s is None: mayavi.engine.new_scene() s = mayavi.engine.current_scene return s ###################################################################### # `OpenFile` class. ###################################################################### class OpenFile(Action): """ An action that opens a data file depending on the supported extensions. """ tooltip = "Open a supported data file" description = "Open any supported data file" path = Str("MenuBar/File/LoadDataMenu") ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ mv = get_imayavi(self.window) s = get_scene(mv) if s is None: return wildcard = 'All files (*.*)|*.*' for src in registry.sources: if len(src.extensions) > 0: if wildcard.endswith('|') or \ src.wildcard.startswith('|'): wildcard += src.wildcard else: wildcard += '|' + src.wildcard parent = self.window.control dialog = FileDialog(parent=parent, title='Open supported data file', action='open', wildcard=wildcard ) if dialog.open() == OK: if not isfile(dialog.path): error("File '%s' does not exist!"%dialog.path, parent) return # FIXME: Ask for user input if a filetype is unknown and # choose appropriate reader. src = mv.open(dialog.path) if src is not None: mv.engine.current_selection = src ###################################################################### # `SourceAction` class. ###################################################################### class SourceAction(Action): # The Metadata associated with this particular action. metadata = Instance(Metadata) def perform(self, event): mv = get_imayavi(self.window) s = get_scene(mv) if s is None: return callable = self.metadata.get_callable() obj = callable() mv.add_source(obj) mv.engine.current_selection = obj ###################################################################### # Creating the source actions automatically. for src in registry.sources: if len(src.extensions) == 0: d = {'tooltip': src.tooltip, 'description': src.desc, 'metadata': src} action = new.classobj(src.id, (SourceAction,), d) globals()[src.id] = action
{ "repo_name": "alexandreleroux/mayavi", "path": "mayavi/action/sources.py", "copies": "2", "size": "3708", "license": "bsd-3-clause", "hash": -2391001102031051300, "line_mean": 31.2434782609, "line_max": 79, "alpha_frac": 0.4865156419, "autogenerated": false, "ratio": 4.866141732283465, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6352657374183466, "avg_score": null, "num_lines": null }
"""An AdaNet evaluator implementation in Tensorflow using a single graph. Copyright 2018 The AdaNet Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl import logging from adanet import tf_compat import numpy as np import tensorflow.compat.v2 as tf # TODO: Remove uses of Evaluator once AdaNet Ranker is implemented. class Evaluator(object): """Evaluates candidate ensemble performance.""" class Objective(object): """The Evaluator objective for the metric being optimized. Two objectives are currently supported: - MINIMIZE: Lower is better for the metric being optimized. - MAXIMIZE: Higher is better for the metric being optimized. """ MINIMIZE = "minimize" MAXIMIZE = "maximize" def __init__(self, input_fn, metric_name="adanet_loss", objective=Objective.MINIMIZE, steps=None): """Initializes a new Evaluator instance. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor`. labels - `Tensor` of labels. metric_name: The name of the evaluation metrics to use when choosing the best ensemble. Must refer to a valid evaluation metric. objective: Either `Objective.MINIMIZE` or `Objective.MAXIMIZE`. steps: Number of steps for which to evaluate the ensembles. If an `OutOfRangeError` occurs, evaluation stops. If set to None, will iterate the dataset until all inputs are exhausted. Returns: An :class:`adanet.Evaluator` instance. """ self._input_fn = input_fn self._steps = steps self._metric_name = metric_name self._objective = objective if objective == self.Objective.MINIMIZE: self._objective_fn = np.nanargmin elif objective == self.Objective.MAXIMIZE: self._objective_fn = np.nanargmax else: raise ValueError( "Evaluator objective must be one of MINIMIZE or MAXIMIZE.") @property def input_fn(self): """Return the input_fn.""" return self._input_fn @property def steps(self): """Return the number of evaluation steps.""" return self._steps @property def metric_name(self): """Returns the name of the metric being optimized.""" return self._metric_name @property def objective_fn(self): """Returns a fn which selects the best metric based on the objective.""" return self._objective_fn def evaluate(self, sess, ensemble_metrics): """Evaluates the given AdaNet objectives on the data from `input_fn`. The candidates are fed the same batches of features and labels as provided by `input_fn`, and their losses are computed and summed over `steps` batches. Args: sess: `Session` instance with most recent variable values loaded. ensemble_metrics: A list dictionaries of `tf.metrics` for each candidate ensemble. Returns: List of evaluated metrics. """ evals_completed = 0 if self.steps is None: logging_frequency = 1000 elif self.steps < 10: logging_frequency = 1 else: logging_frequency = math.floor(self.steps / 10.) objective_metrics = [em[self._metric_name] for em in ensemble_metrics] sess.run(tf_compat.v1.local_variables_initializer()) while True: if self.steps is not None and evals_completed == self.steps: break try: evals_completed += 1 if (evals_completed % logging_frequency == 0 or self.steps == evals_completed): logging.info("Ensemble evaluation [%d/%s]", evals_completed, self.steps or "??") sess.run(objective_metrics) except tf.errors.OutOfRangeError: logging.info("Encountered end of input after %d evaluations", evals_completed) break # Evaluating the first element is idempotent for metric tuples. return sess.run([metric[0] for metric in objective_metrics])
{ "repo_name": "tensorflow/adanet", "path": "adanet/core/evaluator.py", "copies": "1", "size": "4624", "license": "apache-2.0", "hash": 8247104374955044000, "line_mean": 32.0285714286, "line_max": 80, "alpha_frac": 0.6818771626, "autogenerated": false, "ratio": 4.230558096980787, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5412435259580787, "avg_score": null, "num_lines": null }
"""An AdaNet iteration implementation in Tensorflow using a single graph. Copyright 2018 The AdaNet Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import copy import json import os from absl import logging from adanet import distributed from adanet import subnetwork from adanet import tf_compat from adanet.core.ensemble_builder import _EnsembleSpec from adanet.core.eval_metrics import _IterationMetrics import numpy as np import tensorflow.compat.v2 as tf from typing import Any class _TrainManager(object): """Manages the training of SubnetworkSpecs and EnsembleSpecs. This object maintains a dictionary of states for each SubnetworkSpec and EnsembleSpec to coordinate and manage training. Users can check the training status of a spec, or request that it stops training. It also persists metadata about specs to disk in order to be consistent across runs and robust to preemptions. """ def __init__(self, subnetwork_specs, ensemble_specs, train_manager_dir, is_chief): """Initializes a _TrainManager instance. Args: subnetwork_specs: List of `_SubnetworkSpec` instances to monitor. ensemble_specs: List of `EstimatorSpec` instances to monitor. train_manager_dir: Directory for storing metadata about training. When a spec should no longer be trained, a JSON file with its name and metadata is written to this directory, to persist across runs and preemptions. is_chief: Boolean whether the current worker is a chief. """ if not tf.io.gfile.exists(train_manager_dir): tf.io.gfile.makedirs(train_manager_dir) self._train_manager_dir = train_manager_dir self._is_training = { spec.name: not self._is_done_training(spec) for spec in subnetwork_specs + ensemble_specs } self._ensemble_specs = set([e.name for e in ensemble_specs]) self._is_chief = is_chief def should_train(self, spec): """Whether the given spec should keep training.""" return self._is_training[spec.name] def _is_done_training(self, spec): """If the file exists, then the candidate is done training.""" return tf.io.gfile.exists(self._filename_for(spec)) def _filename_for(self, spec): """Returns the filename to identify the spec.""" return os.path.join(self._train_manager_dir, "{}.json".format(spec.name)) def request_stop(self, spec, message): """Registers that given spec should no longer train.""" self._is_training[spec.name] = False # Only write to disk if chief worker, otherwise there is a risk of conflicts # and race conditions during writes. if self._is_chief and not self._is_done_training(spec): with tf.io.gfile.GFile(self._filename_for(spec), "w") as record_file: # TODO: Consider making these messages be some kind of Enum. # There # might be a case where we want to parse these files. For # example, in iteration n+1, maybe we no longer even want to build # NaN candidates. message = {"message": message} record_file.write(json.dumps(message)) def is_over(self): """Whether all specs are done training and the iteration is over.""" for k in sorted(self._is_training): if k in self._ensemble_specs: # In case the sub-estimator is done training (e.g. dataset ran out of # data without repeat) but the "max_iteration_steps" is not reached. continue if self._is_training[k]: # Still needs to train. return False return True class _NanLossHook(tf_compat.SessionRunHook): """Monitors a spec's loss tensor and stops its training if loss is NaN.""" def __init__(self, train_manager, spec): """Initializes a `NanTensorHook`. Args: train_manager: The current iteration's `_TrainManager`. spec: Either a `SubnetworkSpec` or `EnsembleSpec` to monitor. """ self._train_manager = train_manager self._spec = spec def before_run(self, run_context): del run_context # Unused if self._train_manager.should_train(self._spec): return tf_compat.SessionRunArgs(self._spec.loss) def after_run(self, run_context, run_values): loss = run_values.results if loss is None or not np.isnan(loss): return logging.warning("'%s' diverged with loss = NaN.", self._spec.name) # TODO: Re-enable once we know that evaluation won't # fail from NaNs. # self._train_manager.request_stop(self._spec, "NaN loss during training.") class _TrainingLimitHook(tf_compat.SessionRunHook): """Limits a given spec's training to a maximum number of steps. Is also responsible for incrementing the spec's step. """ def __init__(self, train_manager, spec, max_steps, increment_step_op): """Initializes a _TrainingLimitHook instance. Args: train_manager: The current iteration's `_TrainManager`. spec: Either a `SubnetworkSpec` or `EnsembleSpec` to monitor. max_steps: Maximum number steps to train the given spec. increment_step_op: That increments the current step and executes one train op run. """ self._train_manager = train_manager self._spec = spec self._max_steps = max_steps self._increment_step_op = increment_step_op def after_create_session(self, session, coord): if not self._train_manager.should_train(self._spec): return if self._spec.step is None: # None for dummy candidates used during round-robin placement. self._train_manager.request_stop(self._spec, "Dummy candidate to ignore.") return step_value = session.run(self._spec.step) if self._should_stop(step_value): logging.info("Skipping '%s' training which already trained %d steps", self._spec.name, step_value) self._train_manager.request_stop(self._spec, "Training already complete.") def before_run(self, run_context): del run_context # Unused if not self._train_manager.should_train(self._spec): return None if self._increment_step_op is None: # None on TPU. return tf_compat.SessionRunArgs(self._spec.step) return tf_compat.SessionRunArgs(self._increment_step_op) def after_run(self, run_context, run_values): step_value = run_values.results if step_value is None: return if self._should_stop(step_value): logging.info("Now stopping '%s' training after %d steps", self._spec.name, step_value) self._train_manager.request_stop( self._spec, "Training complete after {} steps.".format(step_value)) def _should_stop(self, step): return self._max_steps is not None and step >= self._max_steps class _GlobalStepSetterHook(tf_compat.SessionRunHook): """A hook for setting the global step variable. Should only be run on CPU and GPU, but not TPU. TPUs run many training steps per hook run, so the global step should be incremented in an op along with the candidates' train ops. """ def __init__(self, train_manager, subnetwork_specs, base_global_step, global_step_combiner_fn): """Initializes a _GlobalStepSetterHook instance. Args: train_manager: The current iteration's `_TrainManager`. subnetwork_specs: List of `_SubnetworkSpec` instances for this iteration. base_global_step: Integer global step at the beginning of this iteration. global_step_combiner_fn: Function for combining each subnetwork's iteration step into the global step. """ self._train_manager = train_manager self._subnetwork_specs = subnetwork_specs self._base_global_step = base_global_step self._global_step_combiner_fn = global_step_combiner_fn def begin(self): logging.info("Starting iteration at global step %s", self._base_global_step) steps = [ self._base_global_step + s.step.read_value() for s in self._subnetwork_specs ] updated_global_step = self._global_step_combiner_fn(steps) global_step = tf_compat.v1.train.get_global_step() self._assign_global_step_op = global_step.assign(updated_global_step) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition in hook execution. run_context.session.run(self._assign_global_step_op) class _TrainingHookRunnerHook(tf_compat.SessionRunHook): """Hook wrapper for executing a spec's training hook. Will only run the hook according to the current TrainManager. """ def __init__(self, train_manager, spec, hook): """Initializes a _TrainingHookRunnerHook instance. Only accepts a single hook, since merging hooks is complex and should be handled by the MonitoredTrainingSession instead. Args: train_manager: The current iteration's `_TrainManager`. spec: Either a `SubnetworkSpec` or `EnsembleSpec` to train. hook: The spec's training hook to execute. """ self._train_manager = train_manager self._spec = spec self._hook = hook def begin(self): self._hook.begin() @contextlib.contextmanager def _session_run_context(self): """Intercepts input out of range errors to gracefully stop spec training.""" try: yield except (tf.errors.OutOfRangeError, StopIteration) as e: logging.info("Now stopping '%s' training after hitting end of input", self._spec.name) self._train_manager.request_stop(self._spec, "OutOfRangeError: {}".format(e)) def after_create_session(self, session, coord): with self._session_run_context(): self._hook.after_create_session(session, coord) def before_run(self, run_context): if self._train_manager.should_train(self._spec): # Use a tmp run context to intercept if the hook requests stop. tmp_run_context = tf_compat.v1.train.SessionRunContext( run_context.original_args, run_context.session) with self._session_run_context(): return self._hook.before_run(tmp_run_context) if tmp_run_context.stop_requested: self._train_manager.request_stop(self._spec, "Stop requested.") def after_run(self, run_context, run_values): if self._train_manager.should_train(self._spec): # Use a tmp run context to intercept if the hook requests stop. tmp_run_context = tf_compat.v1.train.SessionRunContext( run_context.original_args, run_context.session) with self._session_run_context(): self._hook.after_run(tmp_run_context, run_values) if tmp_run_context.stop_requested: self._train_manager.request_stop(self._spec, "Stop requested.") def end(self, session): with self._session_run_context(): self._hook.end(session) # TODO: Replace candidates with ensemble_specs. class _Iteration( collections.namedtuple("_Iteration", [ "number", "candidates", "subnetwork_specs", "estimator_spec", "best_candidate_index", "summaries", "train_manager", "subnetwork_reports", "checkpoint", "previous_iteration" ])): """An AdaNet iteration. An AdaNet iteration represents the simultaneous training of multiple candidates for one iteration of the AdaNet loop, and tracks the best candidate's loss, predictions, and evaluation metrics. There must be maximum one _Iteration per graph. """ def __new__(cls, number, candidates, subnetwork_specs, estimator_spec, best_candidate_index, summaries, train_manager, subnetwork_reports, checkpoint, previous_iteration): """Creates a validated `_Iteration` instance. Args: number: The iteration number. candidates: List of `_Candidate` instances to track. subnetwork_specs: List of `_SubnetworkSpec` instances. estimator_spec: `EstimatorSpec` instance. best_candidate_index: Int `Tensor` indicating the best candidate's index. summaries: List of `adanet.Summary` instances for each candidate. train_manager: The current `_TrainManager` for monitoring candidate per training. subnetwork_reports: Dict mapping string names to `subnetwork.Report`s, one per candidate. checkpoint: The `tf.train.Checkpoint` object associated with this iteration. previous_iteration: The iteration occuring before this one or None if this is the first iteration. Returns: A validated `_Iteration` object. Raises: ValueError: If validation fails. """ if not isinstance(number, (int, np.integer)): raise ValueError("number must be an integer") if number < 0: raise ValueError("number must be greater than 0 got %d" % (number)) if not isinstance(candidates, list) or not candidates: raise ValueError("candidates must be a non-empty list") if estimator_spec is None: raise ValueError("estimator_spec is required") if best_candidate_index is None: raise ValueError("best_candidate_index is required") if not isinstance(subnetwork_reports, dict): raise ValueError("subnetwork_reports must be a dict") return super(_Iteration, cls).__new__( cls, number=number, candidates=candidates, subnetwork_specs=subnetwork_specs, estimator_spec=estimator_spec, best_candidate_index=best_candidate_index, summaries=summaries, train_manager=train_manager, subnetwork_reports=subnetwork_reports, checkpoint=checkpoint, previous_iteration=previous_iteration) def _is_numeric(tensor): """Determines if given tensor is a float numeric.""" if not isinstance(tensor, tf.Tensor): return False return tensor.dtype in [tf.bfloat16, tf.float16, tf.float32, tf.float64] class _IterationBuilder(object): """Builds AdaNet iterations.""" def __init__(self, candidate_builder, subnetwork_manager, ensemble_builder, ensemblers, max_steps, summary_maker, global_step_combiner_fn=tf.math.reduce_mean, placement_strategy=distributed.ReplicationStrategy(), replicate_ensemble_in_training=False, use_tpu=False, debug=False, enable_ensemble_summaries=True, enable_subnetwork_summaries=True, enable_subnetwork_reports=True): """Creates an `_IterationBuilder` instance. Args: candidate_builder: A `_CandidateBuilder` instance. subnetwork_manager: A `_SubnetworkManager` instance. ensemble_builder: An `_EnsembleBuilder` instance. ensemblers: An iterable of :class:`adanet.ensemble.Ensembler` objects that define how to ensemble a group of subnetworks. max_steps: Maximum number of steps to train candidate subnetworks. summary_maker: A function that constructs an `adanet.Summary` instance from (namespace, scope, and skip_summary). global_step_combiner_fn: Function for combining each subnetwork's iteration step into the global step. placement_strategy: A `PlacementStrategy` for assigning subnetworks and ensembles to specific workers. replicate_ensemble_in_training: Whether to build the frozen subnetworks in `training` mode during training. use_tpu: Whether AdaNet is running on TPU. debug: Boolean to enable debug mode which will check features and labels for Infs and NaNs. enable_ensemble_summaries: Whether to record summaries to display in TensorBoard for each ensemble candidate. Disable to reduce memory and disk usage per run. enable_subnetwork_summaries: Whether to record summaries to display in TensorBoard for each subnetwork. Disable to reduce memory and disk usage per run. enable_subnetwork_reports: Whether to enable generating subnetwork reports. Returns: An `_IterationBuilder` object. """ if max_steps is not None and max_steps <= 0: raise ValueError("max_steps must be > 0 or None") self._candidate_builder = candidate_builder self._subnetwork_manager = subnetwork_manager self._ensemble_builder = ensemble_builder self._ensemblers = ensemblers self._max_steps = max_steps self._summary_maker = summary_maker self._global_step_combiner_fn = global_step_combiner_fn self._placement_strategy = placement_strategy self._replicate_ensemble_in_training = replicate_ensemble_in_training self._use_tpu = use_tpu self._debug = debug self._enable_ensemble_summaries = enable_ensemble_summaries self._enable_subnetwork_summaries = enable_subnetwork_summaries self._enable_subnetwork_reports = enable_subnetwork_reports super(_IterationBuilder, self).__init__() @property def placement_strategy(self): return self._placement_strategy @placement_strategy.setter def placement_strategy(self, new_placement_strategy): self._placement_strategy = new_placement_strategy def _check_numerics(self, features, labels): """Checks for NaNs and Infs in input features and labels. Args: features: Dictionary of `Tensor` objects keyed by feature name. labels: Labels `Tensor` or a dictionary of string label name to `Tensor` (for multi-head). Can be `None`. Returns: A features and labels tuple with same types and respective inputs, but with numeric check ops wrapping them. """ if not self._debug: return features, labels checked_features, checked_labels = {}, {} logging.info("DEBUG: Checking numerics of float features.") for name in sorted(features): if not _is_numeric(features[name]): continue logging.info("DEBUG: Checking numerics of float feature '%s'.", name) checked_features[name] = tf.debugging.check_numerics( features[name], "features '{}'".format(name)) if isinstance(labels, dict): for name in sorted(labels): if not _is_numeric(labels[name]): continue logging.info("DEBUG: Checking numerics of float label '%s'.", name) checked_labels[name] = tf.debugging.check_numerics( labels[name], "labels '{}'".format(name)) elif labels is not None and _is_numeric(labels): logging.info("DEBUG: Checking numerics of labels.") checked_labels = tf.debugging.check_numerics(labels, "'labels'") return checked_features, checked_labels def build_iteration(self, base_global_step, iteration_number, ensemble_candidates, subnetwork_builders, features, mode, config, labels=None, previous_ensemble_summary=None, rebuilding=False, rebuilding_ensembler_name=None, best_ensemble_index_override=None, previous_iteration=None): """Builds and returns AdaNet iteration t. This method uses the generated the candidate subnetworks given the ensemble at iteration t-1 and creates graph operations to train them. The returned `_Iteration` tracks the training of all candidates to know when the iteration is over, and tracks the best candidate's predictions and loss, as defined by lowest complexity-regularized loss on the train set. Args: base_global_step: Integer global step at the beginning of this iteration. iteration_number: Integer iteration number. ensemble_candidates: Iterable of `adanet.ensemble.Candidate` instances. subnetwork_builders: A list of `Builders` for adding ` Subnetworks` to the graph. Each subnetwork is then wrapped in a `_Candidate` to train. features: Dictionary of `Tensor` objects keyed by feature name. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. config: The `tf.estimator.RunConfig` to use this iteration. labels: `Tensor` of labels. Can be `None`. previous_ensemble_summary: The `adanet.Summary` for the previous ensemble. rebuilding: Boolean whether the iteration is being rebuilt only to restore the previous best subnetworks and ensembles. rebuilding_ensembler_name: Optional ensembler to restrict to, only relevant when rebuilding is set as True. best_ensemble_index_override: Integer index to identify the best ensemble candidate instead of computing the best ensemble index dynamically conditional on the ensemble AdaNet losses. previous_iteration: The iteration occuring before this one or None if this is the first iteration. Returns: An _Iteration instance. Raises: ValueError: If subnetwork_builders is empty. ValueError: If two subnetworks share the same name. ValueError: If two ensembles share the same name. """ self._placement_strategy.config = config logging.info("%s iteration %s", "Rebuilding" if rebuilding else "Building", iteration_number) if not subnetwork_builders: raise ValueError("Each iteration must have at least one Builder.") # TODO: Consider moving builder mode logic to ensemble_builder.py. builder_mode = mode if rebuilding: # Build the subnetworks and ensembles in EVAL mode by default. This way # their outputs aren't affected by dropout etc. builder_mode = tf.estimator.ModeKeys.EVAL if mode == tf.estimator.ModeKeys.PREDICT: builder_mode = mode # Only replicate in training mode when the user requests it. if self._replicate_ensemble_in_training and ( mode == tf.estimator.ModeKeys.TRAIN): builder_mode = mode features, labels = self._check_numerics(features, labels) replay_indices_for_all = {} training = mode == tf.estimator.ModeKeys.TRAIN skip_summaries = mode == tf.estimator.ModeKeys.PREDICT or rebuilding with tf_compat.v1.variable_scope("iteration_{}".format(iteration_number)): seen_builder_names = {} candidates = [] summaries = [] subnetwork_reports = {} previous_ensemble = None previous_ensemble_spec = None previous_iteration_checkpoint = None if previous_iteration: previous_iteration_checkpoint = previous_iteration.checkpoint previous_best_candidate = previous_iteration.candidates[-1] previous_ensemble_spec = previous_best_candidate.ensemble_spec previous_ensemble = previous_ensemble_spec.ensemble replay_indices_for_all[len(candidates)] = copy.copy( previous_ensemble_spec.architecture.replay_indices) # Include previous best subnetwork as a candidate so that its # predictions are returned until a new candidate outperforms. seen_builder_names = {previous_ensemble_spec.name: True} candidates.append(previous_best_candidate) if self._enable_ensemble_summaries: summaries.append(previous_ensemble_summary) # Generate subnetwork reports. if (self._enable_subnetwork_reports and mode == tf.estimator.ModeKeys.EVAL): metrics = previous_ensemble_spec.eval_metrics.eval_metrics_ops() subnetwork_report = subnetwork.Report( hparams={}, attributes={}, metrics=metrics, ) subnetwork_report.metrics["adanet_loss"] = tf_compat.v1.metrics.mean( previous_ensemble_spec.adanet_loss) subnetwork_reports["previous_ensemble"] = subnetwork_report for subnetwork_builder in subnetwork_builders: if subnetwork_builder.name in seen_builder_names: raise ValueError("Two subnetworks have the same name '{}'".format( subnetwork_builder.name)) seen_builder_names[subnetwork_builder.name] = True subnetwork_specs = [] num_subnetworks = len(subnetwork_builders) skip_summary = skip_summaries or not self._enable_subnetwork_summaries for i, subnetwork_builder in enumerate(subnetwork_builders): if not self._placement_strategy.should_build_subnetwork( num_subnetworks, i) and not rebuilding: continue with self._placement_strategy.subnetwork_devices(num_subnetworks, i): subnetwork_name = "t{}_{}".format(iteration_number, subnetwork_builder.name) subnetwork_summary = self._summary_maker( namespace="subnetwork", scope=subnetwork_name, skip_summary=skip_summary) if not skip_summary: summaries.append(subnetwork_summary) logging.info("%s subnetwork '%s'", "Rebuilding" if rebuilding else "Building", subnetwork_builder.name) subnetwork_spec = self._subnetwork_manager.build_subnetwork_spec( name=subnetwork_name, subnetwork_builder=subnetwork_builder, summary=subnetwork_summary, features=features, mode=builder_mode, labels=labels, previous_ensemble=previous_ensemble, config=config) subnetwork_specs.append(subnetwork_spec) # Workers that don't build ensembles need a dummy candidate in order # to train the subnetwork. # Because only ensembles can be considered candidates, we need to # convert the subnetwork into a dummy ensemble and subsequently a # dummy candidate. However, this dummy candidate is never considered a # true candidate during candidate evaluation and selection. # TODO: Eliminate need for candidates. if not self._placement_strategy.should_build_ensemble( num_subnetworks) and not rebuilding: candidates.append( self._create_dummy_candidate(subnetwork_spec, subnetwork_builders, subnetwork_summary, training)) # Generate subnetwork reports. if (self._enable_subnetwork_reports and mode != tf.estimator.ModeKeys.PREDICT): subnetwork_report = subnetwork_builder.build_subnetwork_report() if not subnetwork_report: subnetwork_report = subnetwork.Report( hparams={}, attributes={}, metrics={}) metrics = subnetwork_spec.eval_metrics.eval_metrics_ops() for metric_name in sorted(metrics): metric = metrics[metric_name] subnetwork_report.metrics[metric_name] = metric subnetwork_reports[subnetwork_builder.name] = subnetwork_report # Create (ensemble_candidate*ensembler) ensembles. skip_summary = skip_summaries or not self._enable_ensemble_summaries seen_ensemble_names = {} for ensembler in self._ensemblers: if rebuilding and rebuilding_ensembler_name and ( ensembler.name != rebuilding_ensembler_name): continue for ensemble_candidate in ensemble_candidates: if not self._placement_strategy.should_build_ensemble( num_subnetworks) and not rebuilding: continue ensemble_name = "t{}_{}_{}".format(iteration_number, ensemble_candidate.name, ensembler.name) if ensemble_name in seen_ensemble_names: raise ValueError( "Two ensembles have the same name '{}'".format(ensemble_name)) seen_ensemble_names[ensemble_name] = True summary = self._summary_maker( namespace="ensemble", scope=ensemble_name, skip_summary=skip_summary) if not skip_summary: summaries.append(summary) ensemble_spec = self._ensemble_builder.build_ensemble_spec( name=ensemble_name, candidate=ensemble_candidate, ensembler=ensembler, subnetwork_specs=subnetwork_specs, summary=summary, features=features, mode=builder_mode, iteration_number=iteration_number, labels=labels, my_ensemble_index=len(candidates), previous_ensemble_spec=previous_ensemble_spec, previous_iteration_checkpoint=previous_iteration_checkpoint) # TODO: Eliminate need for candidates. candidate = self._candidate_builder.build_candidate( ensemble_spec=ensemble_spec, training=training, summary=summary, rebuilding=rebuilding) replay_indices_for_all[len(candidates)] = copy.copy( ensemble_spec.architecture.replay_indices) candidates.append(candidate) # TODO: Move adanet_loss from subnetwork report to a new # ensemble report, since the adanet_loss is associated with an # ensemble, and only when using a ComplexityRegularizedEnsemblers. # Keep adanet_loss in subnetwork report for backwards compatibility. if len(ensemble_candidates) != len(subnetwork_builders): continue if len(ensemble_candidate.subnetwork_builders) > 1: continue if mode == tf.estimator.ModeKeys.PREDICT: continue builder_name = ensemble_candidate.subnetwork_builders[0].name if self._enable_subnetwork_reports: subnetwork_reports[builder_name].metrics[ "adanet_loss"] = tf_compat.v1.metrics.mean( ensemble_spec.adanet_loss) # Dynamically select the outputs of best candidate. best_candidate_index = self._best_candidate_index( candidates, best_ensemble_index_override) best_predictions = self._best_predictions(candidates, best_candidate_index) best_loss = self._best_loss(candidates, best_candidate_index, mode) best_export_outputs = self._best_export_outputs(candidates, best_candidate_index, mode, best_predictions) train_manager_dir = os.path.join(config.model_dir, "train_manager", "t{}".format(iteration_number)) train_manager, training_chief_hooks, training_hooks = self._create_hooks( base_global_step, subnetwork_specs, candidates, num_subnetworks, rebuilding, train_manager_dir, config.is_chief) local_init_ops = [] if previous_ensemble_spec: for s in previous_ensemble_spec.ensemble.subnetworks: if s.local_init_ops: local_init_ops.extend(s.local_init_ops) for subnetwork_spec in subnetwork_specs: if (subnetwork_spec and subnetwork_spec.subnetwork and subnetwork_spec.subnetwork.local_init_ops): local_init_ops.extend(subnetwork_spec.subnetwork.local_init_ops) summary = self._summary_maker( namespace=None, scope=None, skip_summary=skip_summaries) summaries.append(summary) with summary.current_scope(): summary.scalar("iteration/adanet/iteration", iteration_number) if best_loss is not None: summary.scalar("loss", best_loss) iteration_metrics = _IterationMetrics(iteration_number, candidates, subnetwork_specs, self._use_tpu, replay_indices_for_all) checkpoint = self._make_checkpoint(candidates, subnetwork_specs, iteration_number, previous_iteration) if self._use_tpu: estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec( mode=mode, predictions=best_predictions, loss=best_loss, train_op=self._create_tpu_train_op(base_global_step, subnetwork_specs, candidates, mode, num_subnetworks, config), eval_metrics=iteration_metrics.best_eval_metrics_tuple( best_candidate_index, mode), export_outputs=best_export_outputs, training_hooks=training_hooks, scaffold_fn=self._get_scaffold_fn(local_init_ops)) else: estimator_spec = tf.estimator.EstimatorSpec( mode=mode, predictions=best_predictions, loss=best_loss, # All training happens in hooks so we don't need a train op. train_op=tf.no_op() if training else None, eval_metric_ops=iteration_metrics.best_eval_metric_ops( best_candidate_index, mode), export_outputs=best_export_outputs, training_chief_hooks=training_chief_hooks, training_hooks=training_hooks, scaffold=self._get_scaffold_fn(local_init_ops)()) return _Iteration( number=iteration_number, candidates=candidates, subnetwork_specs=subnetwork_specs, estimator_spec=estimator_spec, best_candidate_index=best_candidate_index, summaries=summaries, train_manager=train_manager, subnetwork_reports=subnetwork_reports, checkpoint=checkpoint, previous_iteration=previous_iteration) def _get_scaffold_fn(self, local_init_ops): """Creates a method generating a scaffold. TODO: Make this code compatible with TPU estimators. Args: local_init_ops: List of tf.Operations to call during initialization. Returns: Method returning a `tf.train.Scaffold`. """ def get_scaffold(): return tf_compat.v1.train.Scaffold( local_init_op=tf.group( local_init_ops + [tf_compat.v1.train.Scaffold.default_local_init_op()])) return get_scaffold def _create_dummy_candidate(self, subnetwork_spec, subnetwork_builders, subnetwork_summary, training): """Returns a dummy candidate for the given SubnetworkSpec. AdaNet only considers ensembles as candidate models, and ensembles are represented as `_Candidates`. When training only subnetworks, such as on a subnetwork-worker in the RoundRobinStrategy, then we still need a candidate to manage the training of the subnetwork, even if it gets discarded, hence the dummy candidate. Args: subnetwork_spec: The subnetwork spec for the dummy candidate to wrap. subnetwork_builders: List of all subnetwork builders generated this iteration. subnetwork_summary: `_Summary` object to use for TensorBoard. training: Whether or not we are currently training. """ dummy_ensemble_spec = _EnsembleSpec( name="dummy_{}".format(subnetwork_spec.name), ensemble=None, architecture=None, subnetwork_builders=subnetwork_builders, predictions=subnetwork_spec.predictions, loss=subnetwork_spec.loss, step=None, adanet_loss=0., variables=[]) return self._candidate_builder.build_candidate( ensemble_spec=dummy_ensemble_spec, training=training, summary=subnetwork_summary, track_moving_average=False) def _create_tpu_train_op(self, base_global_step, subnetwork_specs, candidates, mode, num_subnetworks, config): """Returns the train op for this set of candidates. This train op combines the train ops from all the candidates into a single train op. Additionally, it is responsible for incrementing the global step. The train op is only non-None during the `TRAIN` mode. Args: base_global_step: Integer global step at the beginning of this iteration. subnetwork_specs: List of `_SubnetworkSpec` instances for this iteration. candidates: List of `_Candidate` instances to train. mode: Defines whether this is training, evaluation or inference. The train op is only non-None during `TRAIN`. See `ModeKeys`. num_subnetworks: Integer number of subnetwork builders generated for the current iteration. config: The `tf.estimator.RunConfig` to use this iteration. Returns: A `Tensor` train op. """ if mode != tf.estimator.ModeKeys.TRAIN: return None ensemble_specs = [c.ensemble_spec for c in candidates] with tf_compat.v1.variable_scope("train_op"): train_ops = [] if self._placement_strategy.should_train_subnetworks(num_subnetworks): for subnetwork_spec in subnetwork_specs: if subnetwork_spec.train_op is not None: train_ops.append(subnetwork_spec.train_op.train_op) for ensemble_spec in ensemble_specs: if ensemble_spec.train_op is not None: # The train op of a previous ensemble is None even during `TRAIN`. train_ops.append(ensemble_spec.train_op.train_op) with tf.control_dependencies(train_ops): # Increment steps after train ops complete to avoid non-determinism. increment_ops = [s.step.assign_add(1) for s in subnetwork_specs] increment_ops += [e.step.assign_add(1) for e in ensemble_specs] if not config.is_chief: return tf.group(*increment_ops) # AdaNet's chief worker is responsible for setting the global step, not # the candidates it trains. Assigning the global step is the final # action performed in the train op. with tf.control_dependencies(increment_ops): steps = [s.step.read_value() for s in subnetwork_specs] global_step = tf_compat.v1.train.get_global_step() return global_step.assign( tf.cast( base_global_step + self._global_step_combiner_fn(steps), dtype=tf.int64)) def _create_hooks(self, base_global_step, subnetwork_specs, candidates, num_subnetworks, rebuilding, train_manager_dir, is_chief): """Returns the hooks to monitor and train this iteration. Args: base_global_step: Integer global step at the beginning of this iteration. subnetwork_specs: List of `_SubnetworkSpec` instances. candidates: List of `_Candidate` instances to compare. num_subnetworks: Integer number of subnetwork builders generated for the current iteration. rebuilding: Boolean whether the iteration is being rebuilt only to restore the previous best subnetworks and ensembles. train_manager_dir: Directory for the TrainManager to store spec metadata. is_chief: Whether the current worker is chief. Returns: A 3-tuple of a _TrainManager for monitoring training, a list of `SessionRunHooks` to run on chief, and a list of `SessionRunHooks` to run on all workers. """ training_chief_hooks, training_hooks = [], [] ensemble_specs = [c.ensemble_spec for c in candidates] train_manager = _TrainManager(subnetwork_specs, ensemble_specs, train_manager_dir, is_chief) if not self._use_tpu: # On TPU, the global step gets incremented in an op since it doesn't have # hook run granularity of CPU and GPU training. training_chief_hooks.append( _GlobalStepSetterHook(train_manager, subnetwork_specs, base_global_step, self._global_step_combiner_fn)) should_train_subnetworks = ( self._placement_strategy.should_train_subnetworks(num_subnetworks)) for spec in subnetwork_specs: if not self._use_tpu: training_hooks.append(_NanLossHook(train_manager, spec)) # We increment the step along with the global step as part of the train # op on TPU, whereas on CPU and GPU we use hooks for fine grained control. if self._use_tpu or not should_train_subnetworks or spec.train_op is None: increment_step_op = None else: with tf.control_dependencies([spec.train_op.train_op]): increment_step_op = spec.step.assign_add(1) # TPU also supports uneven training, but up to num_iterations_per_loop. training_hooks.append( _TrainingLimitHook( train_manager, spec, self._max_steps, increment_step_op=increment_step_op)) if not should_train_subnetworks and not rebuilding: continue self._add_hooks(spec, train_manager, training_chief_hooks, training_hooks) for spec in ensemble_specs: if not self._use_tpu: training_hooks.append(_NanLossHook(train_manager, spec)) # See above comment about incrementing the step on CPU vs. TPU. if self._use_tpu or spec.train_op is None: increment_step_op = None else: with tf.control_dependencies([spec.train_op.train_op]): increment_step_op = spec.step.assign_add(1) training_hooks.append( _TrainingLimitHook( train_manager, spec, self._max_steps, increment_step_op=increment_step_op)) self._add_hooks(spec, train_manager, training_chief_hooks, training_hooks) return train_manager, training_chief_hooks, training_hooks def _add_hooks(self, spec, train_manager, training_chief_hooks, training_hooks): """Appends spec train hooks to the given hook lists.""" if not spec.train_op: return for hook in spec.train_op.chief_hooks: training_chief_hooks.append( _TrainingHookRunnerHook(train_manager, spec, hook)) for hook in spec.train_op.hooks: training_hooks.append(_TrainingHookRunnerHook(train_manager, spec, hook)) def _best_candidate_index(self, candidates, best_ensemble_index_override): """Returns the index of the best candidate in the list. The best candidate is the one with the smallest AdaNet loss, unless `best_ensemble_index_override` is given. TODO: Best ensemble index should always be static during EVAL and PREDICT modes. In case a candidate has a NaN loss, their loss is immediately set to infinite, so that they are not selected. As long as one candidate ensemble has a non-NaN loss during training, the dreaded `NanLossDuringTrainingError` should not be raised. Args: candidates: List of `_Candidate` instances to choose from. best_ensemble_index_override: Integer index to return instead of computing the best ensemble index dynamically. Returns: An integer `Tensor` representing the index of the best candidate. """ with tf_compat.v1.variable_scope("best_candidate_index"): if best_ensemble_index_override is not None: return tf.constant(best_ensemble_index_override) if len(candidates) == 1: return tf.constant(0) adanet_losses = [candidate.adanet_loss for candidate in candidates] # Replace NaNs with -Infs so that NaN loss candidates are always chosen, # causing tf.estimator.Estimator to raise a NanLossDuringTrainingError. adanet_losses = tf.where( tf_compat.v1.is_nan(adanet_losses), tf.ones_like(adanet_losses) * -np.inf, adanet_losses) return tf.argmin(input=adanet_losses, axis=0) def _best_predictions(self, candidates, best_candidate_index): """Returns the best predictions from a set of candidates. Args: candidates: List of `_Candidate` instances to compare. best_candidate_index: `Tensor` index of the best candidate in the list. Returns: A `Tensor` or dictionary of `Tensor`s representing the best candidate's predictions (depending on what the subnetworks return). """ if len(candidates) == 1: return candidates[0].ensemble_spec.predictions with tf_compat.v1.variable_scope("best_predictions"): if isinstance(candidates[0].ensemble_spec.predictions, dict): predictions = {} for candidate in candidates: ensemble_spec = candidate.ensemble_spec for key in sorted(ensemble_spec.predictions): tensor = ensemble_spec.predictions[key] if key in predictions: predictions[key].append(tensor) else: predictions[key] = [tensor] else: predictions = [] for candidate in candidates: ensemble_spec = candidate.ensemble_spec predictions.append(ensemble_spec.predictions) if isinstance(predictions, dict): best_predictions = {} for key in sorted(predictions): tensor_list = predictions[key] best_predictions[key] = tf.stack(tensor_list)[best_candidate_index] else: best_predictions = tf.stack(predictions)[best_candidate_index] return best_predictions def _best_loss(self, candidates, best_candidate_index, mode): """Returns the best loss from a set of candidates. Args: candidates: List of `_Candidate` instances to compare. best_candidate_index: `Tensor` index of the best candidate in the list. mode: Defines whether this is training, evaluation or inference. Loss is always None during inference. See `ModeKeys`. Returns: Float `Tensor` of the best candidate's loss. """ if mode == tf.estimator.ModeKeys.PREDICT: return None if len(candidates) == 1: return candidates[0].ensemble_spec.loss with tf_compat.v1.variable_scope("best_loss"): losses = [c.ensemble_spec.loss for c in candidates] loss = tf.slice(tf.stack(losses), [best_candidate_index], [1]) return tf.reshape(loss, []) def _best_export_outputs(self, candidates, best_candidate_index, mode, best_predictions): """Returns the best `SavedModel` export outputs from a set of candidates. Assumes that all candidate ensembles have identical export output keys and `ExportOutput` types. Args: candidates: List of `_Candidate` instances to compare. best_candidate_index: `Tensor` index of the best candidate in the list. mode: Defines whether this is training, evaluation or inference. Export outputs are always None during training and evaluation. See `ModeKeys`. best_predictions: A `Tensor` or dictionary of `Tensor`s representing the best candidate's predictions (depending on what the subnetworks return). Returns: A `Tensor` dictionary representing the best candidate's export outputs. Raises: TypeError: If the `ExportOutput` type is not supported. """ if mode != tf.estimator.ModeKeys.PREDICT: return None if len(candidates) == 1: return candidates[0].ensemble_spec.export_outputs with tf_compat.v1.variable_scope("best_export_outputs"): # Group tensors by export output key and ExportOutput type. export_outputs = {} # type: Any for candidate in candidates: ensemble_spec = candidate.ensemble_spec for key in sorted(ensemble_spec.export_outputs): export_output = ensemble_spec.export_outputs[key] if isinstance(export_output, tf.estimator.export.ClassificationOutput): if key not in export_outputs: export_outputs[key] = ([], []) if export_output.scores is not None: export_outputs[key][0].append(export_output.scores) if export_output.classes is not None: export_outputs[key][1].append(export_output.classes) elif isinstance(export_output, tf.estimator.export.RegressionOutput): if key not in export_outputs: export_outputs[key] = [] export_outputs[key].append(export_output.value) elif isinstance(export_output, tf.estimator.export.PredictOutput): # Use self._best_predictions() below to get prediction output. continue else: raise TypeError( "Values in export_outputs must be ClassificationOutput, " "RegressionOutput, or PredictOutput objects. Given: {}".format( export_output)) # Stack tensor lists into correct ExportOutput type, outputting the # correct values based on the best candidate index. best_export_outputs = {} for key in sorted(candidates[0].ensemble_spec.export_outputs): export_output = candidates[0].ensemble_spec.export_outputs[key] if isinstance(export_output, tf.estimator.export.ClassificationOutput): scores, classes = None, None if export_outputs[key][0]: scores = tf.stack(export_outputs[key][0])[best_candidate_index] if export_outputs[key][1]: classes = tf.stack(export_outputs[key][1])[best_candidate_index] output = tf.estimator.export.ClassificationOutput( scores=scores, classes=classes) elif isinstance(export_output, tf.estimator.export.RegressionOutput): value = tf.stack(export_outputs[key])[best_candidate_index] output = tf.estimator.export.RegressionOutput(value) else: predictions = copy.copy(export_output.outputs) predictions.update(best_predictions) output = tf.estimator.export.PredictOutput(predictions) best_export_outputs[key] = output return best_export_outputs def _make_checkpoint(self, candidates, subnetwork_specs, iteration_number, previous_iteration): """Returns a `tf.train.Checkpoint` for the iteration.""" # TODO: Handle hook created variables. # TODO: Handle TPU embedding variables. trackable = {} for candidate in candidates: for ensemble_var in candidate.ensemble_spec.variables: trackable["{}_{}".format(candidate.ensemble_spec.name, ensemble_var.name)] = ensemble_var for candidate_var in candidate.variables: trackable["candidate_{}_{}".format(candidate.ensemble_spec.name, candidate_var.name)] = candidate_var for subnetwork_spec in subnetwork_specs: for subnetwork_var in subnetwork_spec.variables: trackable["{}_{}".format(subnetwork_spec.name, subnetwork_var.name)] = subnetwork_var global_step = tf_compat.v1.train.get_global_step() # TODO: Currently, TPUEstimator has no global_step set when # exporting the saved model. if global_step is not None: trackable[tf_compat.v1.GraphKeys.GLOBAL_STEP] = global_step trackable["iteration_number"] = tf_compat.v1.get_variable( "iteration_number", dtype=tf.int64, # Lambda initializer required for TPU. initializer=lambda: tf.constant(iteration_number, dtype=tf.int64), trainable=False) if previous_iteration: trackable["previous_iteration"] = previous_iteration.checkpoint logging.info("TRACKABLE: %s", trackable) checkpoint = tf_compat.v2.train.Checkpoint(**trackable) # Make the save counter to satisfy the assert_consumed() assertion later. # This property creates variables the first time it is called. checkpoint.save_counter # pylint: disable=pointless-statement return checkpoint
{ "repo_name": "tensorflow/adanet", "path": "adanet/core/iteration.py", "copies": "1", "size": "52252", "license": "apache-2.0", "hash": -8760696437804264000, "line_mean": 41.481300813, "line_max": 80, "alpha_frac": 0.6580609355, "autogenerated": false, "ratio": 4.292097913586332, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5450158849086332, "avg_score": null, "num_lines": null }
"""An AdaNet subnetwork definition in Tensorflow using a single graph. Copyright 2018 The AdaNet Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import six def _validate_nested_persisted_tensors(persisted_tensors): """Raises a ValueError when a nested dict is empty in persisted_tensors.""" for key, entry in persisted_tensors.items(): if not isinstance(entry, dict): continue if not entry: raise ValueError("Got empty nested dictionary for key: '{}'".format(key)) _validate_nested_persisted_tensors(entry) class TrainOpSpec( collections.namedtuple("TrainOpSpec", ["train_op", "chief_hooks", "hooks"])): """A data structure for specifying training operations. Args: train_op: Op for the training step. chief_hooks: Iterable of :class:`tf.train.SessionRunHook` objects to run on the chief worker during training. hooks: Iterable of :class:`tf.train.SessionRunHook` objects to run on all workers during training. Returns: A :class:`adanet.subnetwork.TrainOpSpec` object. """ def __new__(cls, train_op, chief_hooks=None, hooks=None): # Make hooks immutable. chief_hooks = tuple(chief_hooks) if chief_hooks else () hooks = tuple(hooks) if hooks else () return super(TrainOpSpec, cls).__new__(cls, train_op, chief_hooks, hooks) class Subnetwork( collections.namedtuple("Subnetwork", [ "last_layer", "logits", "complexity", "persisted_tensors", "shared", "local_init_ops" ])): # pyformat: disable """An AdaNet subnetwork. In the AdaNet paper, an :class:`adanet.subnetwork.Subnetwork` is are called a *subnetwork*, and indicated by *h*. A collection of weighted subnetworks form an AdaNet ensemble. Args: last_layer: :class:`tf.Tensor` output or dict of string to :class:`tf.Tensor` outputs (for multi-head) of the last layer of the subnetwork, i.e the layer before the logits layer. When the mixture weight type is :class:`MATRIX`, the AdaNet algorithm takes care of computing ensemble mixture weights matrices (one per subnetwork) that multiply the various last layers of the ensemble's subnetworks, and regularize them using their subnetwork's complexity. This field is represented by *h* in the AdaNet paper. logits: :class:`tf.Tensor` logits or dict of string to :class:`tf.Tensor` logits (for multi-head) for training the subnetwork. These logits are not used in the ensemble's outputs if the mixture weight type is :class:`MATRIX`, instead AdaNet learns its own logits (mixture weights) from the subnetwork's `last_layers` with complexity regularization. The logits are used in the ensemble only when the mixture weights type is :class:`SCALAR` or :class:`VECTOR`. Even though the logits are not used in the ensemble in some cases, they should always be supplied as adanet uses the logits to train the subnetworks. complexity: A scalar :class:`tf.Tensor` representing the complexity of the subnetwork's architecture. It is used for choosing the best subnetwork at each iteration, and for regularizing the weighted outputs of more complex subnetworks. persisted_tensors: DEPRECATED. See `shared`. Optional nested dictionary of string to :class:`tf.Tensor` to persist across iterations. At the end of an iteration, the :class:`tf.Tensor` instances will be available to subnetworks in the next iterations, whereas others that are not part of the `Subnetwork` will be pruned. This allows later :class:`adanet.subnetwork.Subnetwork` instances to dynamically build upon arbitrary :class:`tf.Tensors` from previous :class:`adanet.subnetwork.Subnetwork` instances. shared: Optional Python object(s), primitive(s), or function(s) to share with subnetworks within the same iteration or in future iterations. local_init_ops: Iterable of :class:`tf.Operation` objects to run to initialize local variables. Returns: A validated :class:`adanet.subnetwork.Subnetwork` object. Raises: ValueError: If last_layer is None. ValueError: If logits is None. ValueError: If logits is a dict but last_layer is not. ValueError: If last_layer is a dict but logits is not. ValueError: If complexity is None. ValueError: If persisted_tensors is present but not a dictionary. ValueError: If persisted_tensors contains an empty nested dictionary. """ # pyformat: enable # Import here to avoid strict BUILD deps check. from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @deprecation.deprecated_args( None, "`persisted_tensors` is deprecated, please use `shared` instead.", "persisted_tensors") def __new__(cls, last_layer, logits, complexity, persisted_tensors=None, shared=None, local_init_ops=None): if last_layer is None: raise ValueError("last_layer not provided") if logits is None: raise ValueError("logits not provided") if isinstance(logits, dict) and not isinstance(last_layer, dict): raise ValueError("if logits is a dict last_layer must also be a dict") if isinstance(last_layer, dict) and not isinstance(logits, dict): raise ValueError("if last_layer is a dict logits must also be a dict") if complexity is None: raise ValueError("complexity not provided") if persisted_tensors is not None: if not isinstance(persisted_tensors, dict): raise ValueError("persisted_tensors must be a dict") _validate_nested_persisted_tensors(persisted_tensors) local_init_ops = tuple(local_init_ops) if local_init_ops else () return super(Subnetwork, cls).__new__( cls, last_layer=last_layer, logits=logits, complexity=complexity, persisted_tensors=persisted_tensors, shared=shared, local_init_ops=local_init_ops) @six.add_metaclass(abc.ABCMeta) class Builder(object): """Interface for a subnetwork builder. Given features, labels, and the best ensemble of subnetworks at iteration t-1, a `Builder` creates a `Subnetwork` to add to a candidate ensemble at iteration t. These candidate ensembles are evaluated against one another at the end of the iteration, and the best one is selected based on its complexity-regularized loss. """ @abc.abstractproperty def name(self): r"""Returns the unique name of this subnetwork within an iteration. Returns: String name of this subnetwork. """ # TODO: Validate name matches ^[A-Za-z0-9_.\\-/]*$ @abc.abstractmethod def build_subnetwork(self, features, labels, logits_dimension, training, iteration_step, summary, previous_ensemble=None): # pyformat: disable """Returns the candidate `Subnetwork` to add to the ensemble. This method will be called only once before :meth:`build_subnetwork_train_op`. This method should construct the candidate subnetwork's graph operations and variables. Accessing the global step via :meth:`tf.train.get_or_create_global_step()` or :meth:`tf.train.get_global_step()` within this scope will return an incrementable iteration step since the beginning of the iteration. Args: features: Input `dict` of :class:`tf.Tensor` objects. labels: Labels :class:`tf.Tensor` or a dictionary of string label name to :class:`tf.Tensor` (for multi-head). Can be `None`. logits_dimension: Size of the last dimension of the logits :class:`tf.Tensor`. Typically, logits have for shape `[batch_size, logits_dimension]`. training: A python boolean indicating whether the graph is in training mode or prediction mode. iteration_step: Integer :class:`tf.Tensor` representing the step since the beginning of the current iteration, as opposed to the global step. summary: An :class:`adanet.Summary` for scoping summaries to individual subnetworks in Tensorboard. Using :meth:`tf.summary` within this scope will use this :class:`adanet.Summary` under the hood. previous_ensemble: The best :class:`adanet.Ensemble` from iteration t-1. The created subnetwork will extend the previous ensemble to form the :class:`adanet.Ensemble` at iteration t. Returns: An :class:`adanet.subnetwork.Subnetwork` instance. """ # pyformat: enable @abc.abstractmethod def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels, iteration_step, summary, previous_ensemble): """Returns an op for training a new subnetwork. This method will be called once after :meth:`build_subnetwork`. Accessing the global step via :meth:`tf.train.get_or_create_global_step()` or :meth:`tf.train.get_global_step()` within this scope will return an incrementable iteration step since the beginning of the iteration. Args: subnetwork: Newest subnetwork, that is not part of the `previous_ensemble`. loss: A :class:`tf.Tensor` containing the subnetwork's loss to minimize. var_list: List of subnetwork :class:`tf.Variable` parameters to update as part of the training operation. labels: Labels :class:`tf.Tensor` or a dictionary of string label name to :class:`tf.Tensor` (for multi-head). iteration_step: Integer :class:`tf.Tensor` representing the step since the beginning of the current iteration, as opposed to the global step. summary: An :class:`adanet.Summary` for scoping summaries to individual subnetworks in Tensorboard. Using `tf.summary` within this scope will use this :class:`adanet.Summary` under the hood. previous_ensemble: The best `Ensemble` from iteration t-1. The created subnetwork will extend the previous ensemble to form the `Ensemble` at iteration t. Is None for iteration 0. Returns: Either a train op or an :class:`adanet.subnetwork.TrainOpSpec`. """ def build_subnetwork_report(self): """Returns a `subnetwork.Report` to materialize and record. This method will be called once after :meth:`build_subnetwork`. Do NOT depend on variables created in :meth:`build_subnetwork_train_op`, because they are not called before :meth:`build_subnetwork_report` is called. If it returns None, AdaNet records the name and standard eval metrics. """ return None @six.add_metaclass(abc.ABCMeta) class Generator(object): """Interface for a candidate subnetwork generator. Given the ensemble of subnetworks at iteration t-1, this object is responsible for generating the set of candidate subnetworks for iteration t that minimize the objective as part of an ensemble. """ @abc.abstractmethod def generate_candidates(self, previous_ensemble, iteration_number, previous_ensemble_reports, all_reports, config): # pyformat: disable """Generates :class:`adanet.subnetwork.Builder` instances for an iteration. NOTE: Every call to :meth:`generate_candidates` must be deterministic for the given arguments. Args: previous_ensemble: The best :class:`adanet.Ensemble` from iteration t-1. DEPRECATED. We are transitioning away from the use of previous_ensemble in generate_candidates. New Generators should *not* use previous_ensemble in their implementation of generate_candidates -- please only use iteration_number, previous_ensemble_reports and all_reports. iteration_number: Python integer AdaNet iteration t, starting from 0. previous_ensemble_reports: List of :class:`adanet.subnetwork.MaterializedReport` instances corresponding to the Builders composing :class:`adanet.Ensemble` from iteration t-1. The first element in the list corresponds to the Builder added in the first iteration. If a :class:`adanet.subnetwork.MaterializedReport` is not supplied to the estimator, previous_ensemble_report is `None`. all_reports: List of :class:`adanet.subnetwork.MaterializedReport` instances. If an :class:`adanet.subnetwork.ReportMaterializer` is not supplied to the estimator, `all_reports` is `None`. If :class:`adanet.subnetwork.ReportMaterializer` is supplied to the estimator and t=0, `all_reports` is an empty List. Otherwise, `all_reports` is a sequence of Lists. Each element of the sequence is a List containing all the :class:`adanet.subnetwork.MaterializedReport` instances in an AdaNet iteration, starting from iteration 0, and ending at iteration t-1. config: The current :class:`tf.estimator.RunConfig` object to configure the runtime settings. Returns: A list of :class:`adanet.subnetwork.Builder` instances. """ # pyformat: enable class SimpleGenerator(Generator): """Always generates the given :class:`adanet.subnetwork.Builder` instances. Args: subnetwork_builders: List of :class:`adanet.subnetwork.Builder` instances to return at each iteration when `generate_candidates` is called. Returns: A :class:`adanet.SimpleGenerator` instance. """ def __init__(self, subnetwork_builders): self._subnetwork_builders = subnetwork_builders def generate_candidates(self, previous_ensemble, iteration_number, previous_ensemble_reports, all_reports): return self._subnetwork_builders
{ "repo_name": "tensorflow/adanet", "path": "adanet/subnetwork/generator.py", "copies": "1", "size": "14469", "license": "apache-2.0", "hash": -3540229054644324400, "line_mean": 41.6814159292, "line_max": 114, "alpha_frac": 0.6996336996, "autogenerated": false, "ratio": 4.193913043478261, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5393546743078261, "avg_score": null, "num_lines": null }
""" An adaptation of pickle that directly links py2 str to py3 bytes, both when writing and when reading. This means it is possible to write protocol=2 pickles containing byte objects (that will be written as python2 str), as well as reading py2 str with non-ascii characters (and instead of being converted to py3 string, they will be read as py3 bytes. (C) 2012, Merlijn van Deen <valhallasw@gmail.com>. Licensed under the MIT license. BytestrPickler.save_bytes was based on _Pickler.save_bytes in Python 2, (C) 2001-2012 Python Software Foundation """ import pickle try: from pickle import _Pickler as Pickler, _Unpickler as Unpickler #py3 except ImportError: from pickle import Pickler, Unpickler import pickletools import struct class BytestrUnpickler(Unpickler): """ An adaptation of pickle._Unpickler that loads python 2 "str" as python 3 "bytes". """ def __init__(self, file, fix_imports=True): super().__init__(file, fix_imports=fix_imports, \ encoding="latin-1", errors="strict") self.dispatch = self.dispatch.copy() opcodes_to_wrap = [x.code.encode('ascii')[0] for x in pickletools.opcodes if x.stack_after == [pickletools.pystring]] for opcode in opcodes_to_wrap: self.dispatch[opcode] = self._byteify(self.dispatch[opcode]) def _byteify(self, dispatcher): def load_bytetype(obj): dispatcher(obj) obj.stack[-1] = obj.stack[-1].encode('latin-1') return load_bytetype class BytestrPickler(Pickler): """ An adaptation of pickle._Pickler that writes 'bytes' as py2 'str's """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dispatch = self.dispatch.copy() self.dispatch[bytes] = self.save_bytes def save_bytes(s, self, obj, pack=struct.pack): if self.proto >= 3: super().save_bytes(obj, pack) return # The following has been directly based on the pickle code in python 2 if self.bin: n = len(obj) if n < 256: self.write(pickle.SHORT_BINSTRING + chr(n).encode('latin-1') + obj) else: self.write(pickle.BINSTRING + pack("<i", n) + obj) else: self.write(pickle.STRING + repr(obj).lstrip('b').encode('ascii') + b'\n') self.memoize(obj) try: from pickle import _Pickler as Pickler, _Unpickler as Unpickler #py3 except ImportError: from pickle import Pickler as BytestrPickler, Unpickler as BytestrUnpickler #py2
{ "repo_name": "valhallasw/py2", "path": "bytestrpickle.py", "copies": "1", "size": "2639", "license": "mit", "hash": 4845610135048010000, "line_mean": 37.2463768116, "line_max": 125, "alpha_frac": 0.6331943918, "autogenerated": false, "ratio": 3.7971223021582734, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.990135809342125, "avg_score": 0.005791720107404645, "num_lines": 69 }
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. This is to support the connection pool's usage of ``__del__`` to return connections to the underlying Queue, which can apparently in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition.""" from time import time as _time try: # py2.4 deque class from collections import deque except: # roll our own... class deque(list): def popleft(self): return self.pop(0) __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ try: import threading except ImportError: import dummy_threading as threading self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
{ "repo_name": "ralfonso/theory", "path": "theory/model/mpdqueue.py", "copies": "6", "size": "6453", "license": "mit", "hash": -1002302427001135200, "line_mean": 32.2628865979, "line_max": 81, "alpha_frac": 0.5792654579, "autogenerated": false, "ratio": 4.51890756302521, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.809817302092521, "avg_score": null, "num_lines": null }
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition.""" from collections import deque from time import time as _time from sqlalchemy.util import threading __all__ = ['Empty', 'Full', 'Queue'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft()
{ "repo_name": "obeattie/sqlalchemy", "path": "lib/sqlalchemy/queue.py", "copies": "1", "size": "6223", "license": "mit", "hash": -4869716493296165000, "line_mean": 33.0054644809, "line_max": 81, "alpha_frac": 0.5838020247, "autogenerated": false, "ratio": 4.532410779315367, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0010197202482124263, "num_lines": 183 }
"""An adapted copy of relevant site-packages functionality from Python stdlib. This file contains some functions related to handling site-packages in Python with jedi-specific modifications: - the functions operate on sys_path argument rather than global sys.path - in .pth files "import ..." lines that allow execution of arbitrary code are skipped to prevent code injection into jedi interpreter """ # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved from __future__ import print_function import sys import os def makepath(*paths): dir = os.path.join(*paths) try: dir = os.path.abspath(dir) except OSError: pass return dir, os.path.normcase(dir) def _init_pathinfo(sys_path): """Return a set containing all existing directory entries from sys_path""" d = set() for dir in sys_path: try: if os.path.isdir(dir): dir, dircase = makepath(dir) d.add(dircase) except TypeError: continue return d def addpackage(sys_path, sitedir, name, known_paths): """Process a .pth file within the site-packages directory: For each line in the file, either combine it with sitedir to a path and add that to known_paths, or execute it if it starts with 'import '. """ if known_paths is None: known_paths = _init_pathinfo(sys_path) reset = 1 else: reset = 0 fullname = os.path.join(sitedir, name) try: f = open(fullname, "r") except OSError: return with f: for n, line in enumerate(f): if line.startswith("#"): continue try: if line.startswith(("import ", "import\t")): # Change by immerrr: don't evaluate import lines to prevent # code injection into jedi through pth files. # # exec(line) continue line = line.rstrip() dir, dircase = makepath(sitedir, line) if not dircase in known_paths and os.path.exists(dir): sys_path.append(dir) known_paths.add(dircase) except Exception: print("Error processing line {:d} of {}:\n".format(n+1, fullname), file=sys.stderr) import traceback for record in traceback.format_exception(*sys.exc_info()): for line in record.splitlines(): print(' '+line, file=sys.stderr) print("\nRemainder of file ignored", file=sys.stderr) break if reset: known_paths = None return known_paths def addsitedir(sys_path, sitedir, known_paths=None): """Add 'sitedir' argument to sys_path if missing and handle .pth files in 'sitedir'""" if known_paths is None: known_paths = _init_pathinfo(sys_path) reset = 1 else: reset = 0 sitedir, sitedircase = makepath(sitedir) if not sitedircase in known_paths: sys_path.append(sitedir) # Add path component known_paths.add(sitedircase) try: names = os.listdir(sitedir) except OSError: return names = [name for name in names if name.endswith(".pth")] for name in sorted(names): addpackage(sys_path, sitedir, name, known_paths) if reset: known_paths = None return known_paths
{ "repo_name": "nitin-cherian/LifeLongLearning", "path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jedi/evaluate/site.py", "copies": "16", "size": "3592", "license": "mit", "hash": 171748310812535140, "line_mean": 31.6545454545, "line_max": 82, "alpha_frac": 0.5865812918, "autogenerated": false, "ratio": 4.133486766398159, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
"""An adapter for Java DOM implementations that makes it possible to access them through the same interface as the Python DOM implementations. Supports: - Sun's Java Project X - Xerces - David Brownell's SAX 2.0 Utilities / DOM2 - Indelv DOM - SXP - OpenXML $Id: javadom.py,v 1.7 2001/02/19 15:21:50 fdrake Exp $ """ # Todo: # - extend test suite # - start using _set_up_attributes, or give up as too slow? # - support level 2 import string # --- Supported Java DOM implementations class BaseDomImplementation: """An abstract DomImplementation with some reusable implementations of build* methods that depend on a lower-level _parse_from_source method.""" def buildDocumentString(self, string): from java.io import StringReader from org.xml.sax import InputSource return self._parse_from_source(InputSource(StringReader(string))) def buildDocumentUrl(self, url): return self._parse_from_source(url) def buildDocumentFile(self, filename): return self.buildDocumentUrl(filetourl(filename)) class SunDomImplementation: def createDocument(self): from com.sun.xml.tree import XmlDocument return Document(XmlDocument()) def buildDocumentString(self, string): from com.sun.xml.tree import XmlDocumentBuilder return Document(XmlDocumentBuilder.createXmlDocument(string)) def buildDocumentUrl(self, url): from com.sun.xml.tree import XmlDocument return Document(XmlDocument.createXmlDocument(url)) def buildDocumentFile(self, filename): return self.buildDocumentUrl(filetourl(filename)) class XercesDomImplementation(BaseDomImplementation): def createDocument(self): from org.apache.xerces.dom import DocumentImpl return Document(DocumentImpl()) def _parse_from_source(self, source): from org.apache.xerces.parsers import DOMParser p = DOMParser() p.parse(source) return Document(p.getDocument()) class BrownellDomImplementation(BaseDomImplementation): def createDocument(self): from org.brownell.xml.dom import DomDocument return Document(DomDocument()) def _parse_from_source(self, source): from org.brownell.xml import DomBuilder return Document(DomBuilder.createDocument(source)) class IndelvDomImplementation(BaseDomImplementation): def createDocument(self): from com.indelv.dom import DOMImpl return Document(DOMImpl.createNewDocument()) def _parse_from_source(self, source): from com.indelv.dom.util import XMLReader from org.xml.sax import InputSource return Document(XMLReader.parseDocument(InputSource(source))) class SxpDomImplementation(BaseDomImplementation): def createDocument(self): from fr.loria.xml import DOMFactory return Document(DOMFactory().createDocument()) def _parse_from_source(self, source): from fr.loria.xml import DocumentLoader loader = DocumentLoader() if type(source) == type(""): doc = loader.loadDocument(source) elif source.getCharacterStream() != None: doc = loader.loadDocument(source.getCharacterStream()) elif source.getByteStream() != None: doc = loader.loadDocument(source.getByteStream()) elif source.getSystemId() != None: doc = loader.loadDocument(source.getSystemId()) return Document(doc) class OpenXmlDomImplementation(BaseDomImplementation): def createDocument(self): from org.openxml.dom import DocumentImpl return Document(DocumentImpl()) def _parse_from_source(self, source): from org.openxml.dom import SAXBuilder from org.openxml.parser import XMLSAXParser builder = SAXBuilder() parser = XMLSAXParser() parser.setDocumentHandler(builder) parser.parse(source) return Document(builder.getDocument()) # ===== Utilities def filetourl(file): # A Python port of James Clark's fileToURL from XMLTest.java. from java.io import File from java.net import URL from java.lang import System file = File(file).getAbsolutePath() sep = System.getProperty("file.separator") if sep != None and len(sep) == 1: file = file.replace(sep[0], '/') if len(file) > 0 and file[0] != '/': file = '/' + file return URL('file', None, file).toString() def _wrap_node(node): if node == None: return None return NODE_CLASS_MAP[node.getNodeType()] (node) # ===== Constants ELEMENT_NODE = 1 ATTRIBUTE_NODE = 2 TEXT_NODE = 3 CDATA_SECTION_NODE = 4 ENTITY_REFERENCE_NODE = 5 ENTITY_NODE = 6 PROCESSING_INSTRUCTION_NODE = 7 COMMENT_NODE = 8 DOCUMENT_NODE = 9 DOCUMENT_TYPE_NODE = 10 DOCUMENT_FRAGMENT_NODE = 11 NOTATION_NODE = 12 # ===== DOMException try: from org.w3c.dom import DOMException except ImportError, e: pass # ===== DOMImplementation class DOMImplementation: def __init__(self, impl): self._impl = impl def hasFeature(self, feature, version): if version == None or version == "1.0": return string.lower(feature) == "xml" and \ self._impl.hasFeature(feature, version) else: return 0 def __repr__(self): return "<DOMImplementation javadom.py, using '%s'>" % self._impl # ===== Node class Node: def __init__(self, impl): self.__dict__['_impl'] = impl # attributes def _get_nodeName(self): return self._impl.getNodeName() def _get_nodeValue(self): return self._impl.getNodeValue() def _get_nodeType(self): return self._impl.getNodeType() def _get_parentNode(self): return _wrap_node(self._impl.getParentNode()) def _get_childNodes(self): children = self._impl.getChildNodes() if children is None: return children else: return NodeList(children) def _get_firstChild(self): return _wrap_node(self._impl.getFirstChild()) def _get_lastChild(self): return _wrap_node(self._impl.getLastChild()) def _get_previousSibling(self): return _wrap_node(self._impl.getPreviousSibling()) def _get_nextSibling(self): return _wrap_node(self._impl.getNextSibling()) def _get_ownerDocument(self): return _wrap_node(self._impl.getOwnerDocument()) def _get_attributes(self): atts = self._impl.getAttributes() if atts is None: return None else: return NamedNodeMap(atts) # methods def insertBefore(self, new, neighbour): self._impl.insertBefore(new._impl, neighbour._impl) def replaceChild(self, new, old): self._impl.replaceChild(new._impl, old._impl) return old def removeChild(self, old): self._impl.removeChild(old._impl) return old def appendChild(self, new): return self._impl.appendChild(new._impl) def hasChildNodes(self): return self._impl.hasChildNodes() def cloneNode(self): return _wrap_node(self._impl.cloneNode()) # python def __getattr__(self, name): if name[ : 5] != '_get_': return getattr(self, '_get_' + name) () raise AttributeError, name def __setattr__(self, name, value): getattr(self, '_set_' + name) (value) # ===== Document class Document(Node): def __init__(self, impl): Node.__init__(self, impl) # methods def createTextNode(self, data): return Text(self._impl.createTextNode(data)) def createEntityReference(self, name): return EntityReference(self._impl.createEntityReference(name)) def createElement(self, name): return Element(self._impl.createElement(name)) def createDocumentFragment(self): return DocumentFragment(self._impl.createDocumentFragment()) def createComment(self, data): return Comment(self._impl.createComment(data)) def createCDATASection(self, data): return CDATASection(self._impl.createCDATASection(data)) def createProcessingInstruction(self, target, data): return ProcessingInstruction(self._impl.createProcessingInstruction(target, data)) def createAttribute(self, name): return Attr(self._impl.createAttribute(name)) def getElementsByTagName(self, name): return NodeList(self._impl.getElementsByTagName(name)) # attributes def _get_doctype(self): return self._impl.getDoctype() def _get_implementation(self): return DOMImplementation(self._impl.getImplementation()) def _get_documentElement(self): return _wrap_node(self._impl.getDocumentElement()) # python def __repr__(self): docelm = self._impl.getDocumentElement() if docelm: return "<Document with root '%s'>" % docelm.getTagName() else: return "<Document with no root>" # ===== Element class Element(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_tagName'] = self._impl.getTagName self.__dict__['getAttribute'] = self._impl.getAttribute self.__dict__['setAttribute'] = self._impl.setAttribute self.__dict__['removeAttribute'] = self._impl.removeAttribute self.__dict__['normalize'] = self._impl.normalize # methods def getAttributeNode(self, name): node = self._impl.getAttributeNode(name) if node == None: return node else: return Attr(node) def setAttributeNode(self, attr): self._impl.setAttributeNode(attr._impl) def removeAttributeNode(self, attr): self._impl.removeAttributeNode(attr._impl) def getElementsByTagName(self, name): return NodeList(self._impl.getElementsByTagName(name)) # python def __repr__(self): return "<Element '%s' with %d attributes and %d children>" % \ (self._impl.getTagName(), self._impl.getAttributes().getLength(), self._impl.getChildNodes().getLength()) # ===== CharacterData class CharacterData(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_data'] = self._impl.getData self.__dict__['_set_data'] = self._impl.setData self.__dict__['_get_length'] = self._impl.getLength self.__dict__['substringData'] = self._impl.substringData self.__dict__['appendData'] = self._impl.appendData self.__dict__['insertData'] = self._impl.insertData self.__dict__['deleteData'] = self._impl.deleteData self.__dict__['replaceData'] = self._impl.replaceData # ===== Comment class Comment(CharacterData): def __repr__(self): return "<Comment of length %d>" % self.getLength() # ===== ProcessingInstruction class ProcessingInstruction(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_target'] = self._impl.getTarget self.__dict__['_get_data'] = self._impl.getData self.__dict__['_set_data'] = self._impl.setData def __repr__(self): return "<PI with target '%s'>" % self._impl.getTarget() # ===== Text class Text(CharacterData): def splitText(self, offset): return Text(self._impl.splitText(offset)) def __repr__(self): return "<Text of length %d>" % self._impl.getLength() # ===== CDATASection class CDATASection(Text): def __repr__(self): return "<CDATA section of length %d>" % self._impl.getLength() # ===== Attr class Attr(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_name'] = self._impl.getName self.__dict__['_get_specified'] = self._impl.getSpecified self.__dict__['_get_value'] = self._impl.getValue self.__dict__['_set_value'] = self._impl.setValue def __repr__(self): return "<Attr '%s'>" % self._impl.getName() # ===== EntityReference class EntityReference(Node): def __repr__(self): return "<EntityReference '%s'>" % self.getNodeName() # ===== DocumentType class DocumentType(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_name'] = self._impl.getName def _get_entities(self): return NamedNodeMap(self._impl.getEntities()) def _get_notations(self): return NamedNodeMap(self._impl.getNotations()) def __repr__(self): return "<DocumentType '%s'>" % self._impl.getNodeName() # ===== Notation class Notation(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_publicId'] = self._impl.getPublicId self.__dict__['_get_systemId'] = self._impl.getSystemId def __repr__(self): return "<Notation '%s'>" % self._impl.getNodeName() # ===== Entity class Entity(Node): def __init__(self, impl): Node.__init__(self, impl) self.__dict__['_get_publicId'] = self._impl.getPublicId self.__dict__['_get_systemId'] = self._impl.getSystemId self.__dict__['_get_notationName'] = self._impl.getNotationName def __repr__(self): return "<Entity '%s'>" % self._impl.getNodeName() # ===== DocumentFragment class DocumentFragment(Node): def __repr__(self): return "<DocumentFragment>" # ===== NodeList class NodeList: def __init__(self, impl): self._impl = impl self.__dict__['__len__'] = self._impl.getLength self.__dict__['_get_length'] = self._impl.getLength self.__dict__['item'] = self._impl.item # Python list methods def __getitem__(self, ix): if ix < 0: ix = len(self) + ix node = self._impl.item(ix) if node == None: raise IndexError, ix else: return _wrap_node(node) def __setitem__(self, ix, item): raise TypeError, "NodeList instances don't support item assignment" def __delitem__(self, ix, item): raise TypeError, "NodeList instances don't support item deletion" def __setslice__(self, i, j, list): raise TypeError, "NodeList instances don't support slice assignment" def __delslice__(self, i, j): raise TypeError, "NodeList instances don't support slice deletion" def append(self, item): raise TypeError, "NodeList instances don't support .append()" def insert(self, i, item): raise TypeError, "NodeList instances don't support .insert()" def pop(self, i=-1): raise TypeError, "NodeList instances don't support .pop()" def remove(self, item): raise TypeError, "NodeList instances don't support .remove()" def reverse(self): raise TypeError, "NodeList instances don't support .reverse()" def sort(self, *args): raise TypeError, "NodeList instances don't support .sort()" def __add__(self, *args): raise TypeError, "NodeList instances don't support +" def __radd__(self, *args): raise TypeError, "NodeList instances don't support +" def __mul__(self, *args): raise TypeError, "NodeList instances don't support *" def __rmul__(self, *args): raise TypeError, "NodeList instances don't support *" def count(self, *args): raise TypeError, "NodeList instances can't support count without equality" def count(self, *args): raise TypeError, "NodeList instances can't support index without equality" def __getslice__(self, i, j): if i < len(self): i = len(self) + i if j < len(self): j = len(self) + j slice = [] for ix in range(i, min(j, len(self))): slice.append(self[ix]) return slice def __repr__(self): return "<NodeList [ %s ]>" % string.join(map(repr, self), ", ") # ===== NamedNodeMap class NamedNodeMap: def __init__(self, impl): self._impl = impl self.__dict__['_get_length'] = self._impl.getLength self.__dict__['__len__'] = self._impl.getLength # methods def getNamedItem(self, name): return _wrap_node(self._impl.getNamedItem(name)) def setNamedItem(self, node): return _wrap_node(self._impl.setNamedItem(node._impl)) def removeNamedItem(self, name): return _wrap_node(self._impl.removeNamedItem(name)) def item(self, index): return _wrap_node(self._impl.item(index)) # Python dictionary methods def __getitem__(self, key): node = self._impl.getNamedItem(key) if node is None: raise KeyError, key else: return _wrap_node(node) def get(self, key, alternative = None): node = self._impl.getNamedItem(key) if node is None: return alternative else: return _wrap_node(node) def has_key(self, key): return self._impl.getNamedItem(key) != None def items(self): list = [] for ix in range(self._impl.getLength()): node = self._impl.item(ix) list.append((node.getNodeName(), _wrap_node(node))) return list def keys(self): list = [] for ix in range(self._impl.getLength()): list.append(self._impl.item(ix)._get_nodeName()) return list def values(self): list = [] for ix in range(self._impl.getLength()): list.append(_wrap_node(self._impl.item(ix))) return list def __setitem__(self, key, item): assert key == item._impl._get_nodeName() self._impl.setNamedItem(item._impl) def update(self, nnm): for v in nnm.values(): self._impl.setNamedItem(v._impl) def __repr__(self): pairs = [] for pair in self.items(): pairs.append("'%s' : %s" % pair) return "<NamedNodeMap { %s }>" % string.join(pairs, ", ") # ===== Various stuff NODE_CLASS_MAP = { ELEMENT_NODE : Element, ATTRIBUTE_NODE : Attr, TEXT_NODE : Text, CDATA_SECTION_NODE : CDATASection, ENTITY_REFERENCE_NODE : EntityReference, ENTITY_NODE : Entity, PROCESSING_INSTRUCTION_NODE : ProcessingInstruction, COMMENT_NODE : Comment, DOCUMENT_NODE : Document, DOCUMENT_TYPE_NODE : DocumentType, DOCUMENT_FRAGMENT_NODE : DocumentFragment, NOTATION_NODE : Notation } # ===== Self-test if __name__ == "__main__": impl = BrownellDomImplementation() #XercesDomImplementation() #SunDomImplementation() doc2 = impl.createDocument() print doc2 print doc2._get_implementation() root = doc2.createElement("doc") print root doc2.appendChild(root) txt = doc2.createTextNode("This is a simple sample \n") print txt root.appendChild(txt) print root._get_childNodes()[0] print root._get_childNodes() root.setAttribute("huba", "haba") print root print root._get_attributes()
{ "repo_name": "StephenHamilton/gini", "path": "backend/src/gloader/xml/dom/javadom.py", "copies": "12", "size": "19282", "license": "mit", "hash": 3166134834478810000, "line_mean": 26.7040229885, "line_max": 90, "alpha_frac": 0.6123846074, "autogenerated": false, "ratio": 3.934299122628035, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
"""An adapter for the standard blocking requests library.""" from __future__ import unicode_literals, absolute_import import aiohttp from collections import AsyncIterable, deque from json import loads as load_json, dumps as dump_json from helium.__about__ import __version__ from helium.session import Response, CB class LiveIterator(AsyncIterable): """Iterable over a live endpoint.""" _FIELD_SEPARATOR = ':' def __init__(self, response, session, resource_class, resource_args): """Construct a live endpoint async iterator. Keyword Args: response(Response): The response to a live endpoint request session(Session): The session with Helium resource_class(Resource): The class of resource to construct """ self._response = response self._session = session self._resource_class = resource_class self._resource_args = resource_args def __aiter__(self): """Create an async iterator.""" return self async def __anext__(self): """Iterate over lines looking for resources.""" resource_class = self._resource_class resource_args = self._resource_args session = self._session response = self._response async for line in response.content: line = line.decode('utf-8') if len(line) == 0 and response.at_eof: raise StopAsyncIteration if len(line.strip()) > 0: field, data = line.split(self._FIELD_SEPARATOR, 1) if field.strip() == 'data': json = load_json(data).get('data') return resource_class(json, session, **resource_args) def take(self, n): """Return the next n datapoints. Args: n(int): The number of datapoints to retrieve Returns: A list of at most `n` datapoints. """ return self._session.adapter.take(self, n) def close(self): """Close the live iterator.""" self._response.close() async def __aenter__(self): """Enter context.""" # Get the actual response response = await self._response CB.boolean(200)(Response(response.status, response.headers, response.content, 'GET', response.url)) self._response = response # and enter its' context await self._response.__aenter__() return self async def __aexit__(self, *args): """Exit context and close iterator.""" self.close() await self._response.__aexit__(*args) class DatapointIterator(AsyncIterable): """Iterator over a timeseries endpoint.""" def __init__(self, timeseries, loop=None): """Construct an iterator. Args: timeseries: the timeseries to iterate over loop: The asyncio loop to use for iterating """ self.timeseries = timeseries self.queue = deque() self.continuation_url = timeseries._base_url def __aiter__(self): """Async iterator over data points in a timeseries.""" return self async def __anext__(self): """Return the next datapoint.""" timeseries = self.timeseries session = timeseries._session is_aggregate = timeseries._is_aggregate if len(self.queue) == 0: if self.continuation_url is None: raise StopAsyncIteration def _process(json): data = json.get('data') links = json.get('links') self.continuation_url = links.get(timeseries._direction, None) self.queue.extend(data) await session.get(self.continuation_url, CB.json(200, _process), params=timeseries._params) if len(self.queue) == 0: raise StopAsyncIteration json = self.queue.popleft() return timeseries._datapoint_class(json, session, is_aggregate=is_aggregate) class Adapter(aiohttp.client.ClientSession): """A asynchronous adapter based on the `aiohttp` library.""" def __init__(self, loop=None): """Construct a basic requests session with the Helium API.""" super(Adapter, self).__init__(headers={ 'Accept': 'application/json', 'Accept-Charset': 'utf-8', 'Content-Type': "application/json", 'User-Agent': 'helium-python/{0}'.format(__version__) }, loop=loop) @property def api_token(self): """The API token to use.""" return self._default_headers.get('Authorization', None) @api_token.setter def api_token(self, api_token): self._default_headers.update({ 'Authorization': api_token }) async def _http(self, callback, method, url, params=None, json=None, headers=None, files=None): data = None if files: data = files elif json: data = dump_json(json) data = files if files else data async with self.request(method, url, params=params, headers=headers, data=data) as response: body = await response.text(encoding='utf-8') return callback(Response(response.status, response.headers, body, method, url)) def get(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'GET', url, params=params, json=json, headers=headers) def put(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'PUT', url, params=params, json=json, headers=headers) def post(self, url, callback, params=None, json=None, headers=None, files=None): # noqa: D102 return self._http(callback, 'POST', url, params=params, json=json, headers=headers, files=files) def patch(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'PATCH', url, params=params, json=json, headers=headers) def delete(self, url, callback, json=None): # noqa: D102 return self._http(callback, 'DELETE', url, json=json) def live(self, session, url, resource_class, resource_args, params=None): # noqa: D102 headers = { 'Accept': 'text/event-stream', } response = super(Adapter, self).get(url, read_until_eof=False, params=params, headers=headers) return LiveIterator(response, session, resource_class, resource_args) def datapoints(self, timeseries): # noqa: D102 return DatapointIterator(timeseries) async def take(self, aiter, n): # noqa: D102 result = [] if n == 0: return result async for entry in aiter: result.append(entry) n -= 1 if n == 0: break return result
{ "repo_name": "helium/helium-python", "path": "helium/adapter/aiohttp.py", "copies": "1", "size": "7570", "license": "bsd-3-clause", "hash": -5396225908550262000, "line_mean": 32.4955752212, "line_max": 78, "alpha_frac": 0.5443857332, "autogenerated": false, "ratio": 4.562989752863171, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 226 }
"""An adapter for the standard blocking requests library.""" from __future__ import unicode_literals, absolute_import import requests from collections import Iterable, Iterator, deque from json import loads as load_json from helium.__about__ import __version__ from helium.session import Response, CB from itertools import islice class LiveIterator(Iterable): """Iterable over a live endpoint.""" _FIELD_SEPARATOR = ':' def __init__(self, response, session, resource_class, resource_args): """Construct a live endpoint represented as an Iterable. Keyword Args: response(Response): The response to a live endpoint request session(Session): The session with Helium resource_class(Resource): The class of resource to construct """ self._response = response self._session = session self._resource_class = resource_class self._resource_args = resource_args def _read(self, response): for line in response.iter_lines(decode_unicode=True): yield line def __iter__(self): """Iterate over lines looking for resources.""" resource_class = self._resource_class resource_args = self._resource_args session = self._session response = self._response for chunk in self._read(response): event_data = "" for line in chunk.splitlines(): # Ignore empty lines if not line.strip(): continue data = line.split(self._FIELD_SEPARATOR, 1) field = data[0] data = data[1] if field == 'data': event_data += data if not event_data: # Don't report on events with no data continue event_data = load_json(event_data).get('data') yield resource_class(event_data, session, **resource_args) def take(self, n): """Return the next n datapoints. Args: n(int): The number of datapoints to retrieve Returns: A list of at most `n` datapoints. """ return self._session.adapter.take(self, n) def close(self): """Close the live session.""" self._response.close() def __enter__(self): """Enter context.""" return self def __exit__(self, *args): """Exit context.""" self.close() return False class DatapointIterator(Iterator): """Iterator over a timeseries endpoint.""" def __init__(self, timeseries): """Construct an iterator. Args: timeseries: the timeseries to iterate over loop: The asyncio loop to use for iterating """ self.timeseries = timeseries self.queue = deque() self.continuation_url = timeseries._base_url def __iter__(self): """Iterator for data points in a timeseries.""" return self # pragma: no cover def __next__(self): """Return the next data point.""" timeseries = self.timeseries session = timeseries._session is_aggregate = timeseries._is_aggregate if len(self.queue) == 0: if self.continuation_url is None: raise StopIteration def _process(json): data = json.get('data') links = json.get('links') self.continuation_url = links.get(timeseries._direction, None) self.queue.extend(data) session.get(self.continuation_url, CB.json(200, _process), params=timeseries._params) if len(self.queue) == 0: raise StopIteration json = self.queue.popleft() return timeseries._datapoint_class(json, session, is_aggregate=is_aggregate) def next(self): """Python 2 iterator compatibility.""" # We remove coverage here to pacify coverage since this method # used in python 2.7 but no longer in python 3.5 return self.__next__() # pragma: no cover class Adapter(requests.Session): """A synchronous adapter based on the `requests` library.""" def __init__(self): """Construct a basic requests session with the Helium API.""" super(Adapter, self).__init__() self.headers.update({ 'Accept': 'application/json', 'Accept-Charset': 'utf-8', 'Content-Type': "application/json", 'User-Agent': 'helium-python/{0}'.format(__version__) }) @property def api_token(self): """The API token to use.""" return self.headers.get('Authorization', None) @api_token.setter def api_token(self, api_token): self.headers.update({ 'Authorization': api_token }) def _http(self, callback, method, url, params=None, json=None, headers=None, files=None): response = super(Adapter, self).request(method, url, params=params, json=json, headers=headers, files=files) if not response.encoding: response.encoding = 'utf8' body = response.text request = response.request return callback(Response(response.status_code, response.headers, body, request.method, request.url)) def get(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'GET', url, params=params, headers=headers, json=json) def put(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'PUT', url, params=params, json=json, headers=headers) def post(self, url, callback, params=None, json=None, headers=None, files=None): # noqa: D102 return self._http(callback, 'POST', url, params=params, json=json, headers=headers, files=files) def patch(self, url, callback, params=None, json=None, headers=None): # noqa: D102 return self._http(callback, 'PATCH', url, params=params, json=json, headers=headers) def delete(self, url, callback, json=None): # noqa: D102 return self._http(callback, 'DELETE', url, json=json) def datapoints(self, timeseries): # noqa: D102 return DatapointIterator(timeseries) def take(self, iter, n): # noqa: D102 return list(islice(iter, n)) def live(self, session, url, resource_class, resource_args, params=None): # noqa: D102 headers = { 'Accept': 'text/event-stream', } response = super(Adapter, self).get(url, stream=True, headers=headers, params=params) # Validate the response code CB.boolean(200)(Response(response.status_code, response.headers, None, response.request.method, url)) return LiveIterator(response, session, resource_class, resource_args)
{ "repo_name": "helium/helium-python", "path": "helium/adapter/requests.py", "copies": "1", "size": "7559", "license": "bsd-3-clause", "hash": 3513103072996349400, "line_mean": 32.5955555556, "line_max": 91, "alpha_frac": 0.5459716894, "autogenerated": false, "ratio": 4.62041564792176, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 225 }
"""An adaptor between :class:`ControllerBlock` and the Klamp't C++ serial controller interface (SerialController). """ import asyncore,socket import errno import json import time from .. import controller headerlen = 4 def packStrlen(s): l = len(s) assert(l <= 0xffffffff) bytes = [None]*4 bytes[0] = chr(l&0xff) bytes[1] = chr((l>>8)&0xff) bytes[2] = chr((l>>16)&0xff) bytes[3] = chr((l>>24)&0xff) return ''.join(bytes) def unpackStrlen(s): assert len(s)==headerlen return (ord(s[3])<<24)|(ord(s[2])<<16)|(ord(s[1])<<8)|ord(s[0]) def writeSocket(socket,msg): totalsent = 0 while totalsent < len(msg): sent = socket.send(msg[totalsent:]) if sent == 0: raise IOError("socket connection broken") totalsent = totalsent + sent return def readSocket(socket,length): chunk = socket.recv(length) msg = chunk while len(msg) < length: chunk = socket.recv(length-len(msg)) if chunk == '': raise IOError("socket connection broken") msg = msg + chunk return msg class JsonClient(asyncore.dispatcher): """An asyncore client that transmits JSON messages in the Klamp't simple serial interface. Sends/receives variable-length messages such that the first 4 bytes are the length of the message (in binary) and the remainder is the payload. Subclasses should override onMessage, which accepts with arbitrary Python objects that can be serialized by the json module. Subclasses should use sendMessage to send a message. To run, call asyncore.loop(). """ def __init__(self, addr): if isinstance(addr,socket.socket): asyncore.dispatcher.__init__(self,s) else: asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( addr ) self.buffer = "" def handle_connect(self): """Called on socket connect. May be overridden.""" pass def handle_close(self): """Called on socket close. May be overridden.""" self.close() def handle_read(self): """Called on read. Do not override; override onMessage instead.""" lenstr = self.read(headerlen) msglen = unpackStrlen(lenstr) msg = self.read(msglen) try: output = json.loads(msg) except ValueError: print("Error parsing JSON object from message '"+msg+"'") return self.onMessage(output) def writable(self): """Called to determine whether there's any data left to be sent. Do not override.""" return (len(self.buffer) > 0) def handle_write(self): """Called to send data when available. Do not override.""" sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def onMessage(self,msg): """Override this to handle an incoming message""" pass def sendMessage(self,msg): """Call this to send an outgoing message""" smsg = json.dumps(msg) #print("JSON message:",smsg) self.buffer = self.buffer + packStrlen(smsg) + smsg #print("buffer now:",self.buffer) def read(self,length): chunk = self.recv(length) msg = chunk while len(msg) < length: chunk = self.recv(length-len(msg)) if chunk == '': raise IOError("socket connection broken") msg = msg + chunk return msg def recv(self, buffer_size): """Fix for windows sockets throwing EAGAIN crashing asyncore""" while True: try: data = self.socket.recv(buffer_size) if not data: # a closed connection is indicated by signaling # a read condition, and having recv() return 0. print("JsonClient: Socket closed...") self.handle_close() return '' else: return data except socket.error as why: # winsock sometimes throws ENOTCONN if why.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): #print "EGAIN or EWOULDBLOCK returned... spin waiting" time.sleep(0.001) continue elif why.args[0] == errno.ENOTCONN: self.handle_close() return '' else: raise class ControllerClient(JsonClient): """An asyncore client that relays Klampt :class:`ControllerBlock` I/O to some receiver via a JSON-based serial interface. For example, this can be connected to a :class:`SerialController` or to the SimTest app. The interface simply translates messages back and forth using the raw ControllerBlock input / output dictionaries. This uses the asyncore module. To run, pass it an address and a :class:`ControllerBlock` interface. Then, call ``asyncore.loop()``. The calling convention looks like this:: import asyncore from klampt.control.io.serialcontroller import ControllerClient from klampt.control.controller import ControllerBlock class MyController(ControllerBlock): ...define your controller here... #open up a client on localhost:3456 client = ControllerClient(('localhost',3456),'MyController()) asyncore.loop() Arguments: addr: a (host,port) pair or """ def __init__(self,addr,controller): self.connecting = True JsonClient.__init__(self,addr) self.controller = controller def handle_connect(self): print("Handle connect") JsonClient.handle_connect(self) def handle_expt(self): self.close() def handle_error(self): JsonClient.handle_error(self) if self.connecting: print() print("(Did you forget to start up a Klamp't controller server?)") else: print() print("(Did the Klamp't controller server shut down?)") def handle_connect(self): self.connecting = False; self.controller.signal('enter') return def onMessage(self,msg): #print "receiving message",msg try: res = self.controller.output_and_advance(**msg) if res==None: return except Exception as e: print("Exception",e,"on read") return try: #print "sending message",res self.sendMessage(res) except IOError as e: print("Exception",e,"on send") return class JsonSerialController(controller.ControllerBlock): """A controller that maintains a server to write/read messages every output_and_advance cycle. It simply translates messages back and forth to a client via a JSON-based serial interface. """ def __init__(self,addr=('localhost',3456)): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind( addr ) self.sock.listen(1) print("JsonSerialController: Listening on port",addr[1]) self.clientsock = None def accept(self): """Get a new connection, if there isn't one""" if self.clientsock == None: pair = self.sock.accept() if pair != None: sock, addr = pair print('JsonSerialController: Incoming connection from %s' % repr(addr)) self.clientsock = sock return def advance(self,**inputs): self.accept() if self.clientsock == None: return None #Convert inputs to JSON message smsg = json.dumps(inputs) msg = packStrlen(smsg) + smsg try: writeSocket(self.clientsock,msg) #Read response from serial client lenstr = readSocket(self.clientsock,headerlen) msglen = unpackStrlen(lenstr) msg = readSocket(self.clientsock,msglen) except IOError: print("JsonSerialController: Error writing or reading socket...") self.clientsock.close() self.clientsock = None return None try: output = json.loads(msg) return output except ValueError: #didn't parse properly print("JsonSerialController: Couldn't read Python object from JSON message '"+msg+"'") return None if __name__ == "__main__": import sys from ..blocks import trajectory_tracking from klampt import io host = 'localhost' port = 3456 if len(sys.argv)==1: print("Usage: %s [linear_path_file]\n"%(sys.argv[0],)) print("By default connects to localhost:3456") exit() #by default, runs a trajectory controller pathfn = sys.argv[1] traj = io.load(pathfn) pycontroller = trajectory_tracking.TrajectoryPositionController(traj) s = ControllerClient((host,port),pycontroller) asyncore.loop() def make(robot): return JsonSerialController()
{ "repo_name": "krishauser/Klampt", "path": "Python/klampt/control/io/serialcontroller.py", "copies": "1", "size": "9226", "license": "bsd-3-clause", "hash": 2202591038443523800, "line_mean": 32.0681003584, "line_max": 98, "alpha_frac": 0.590071537, "autogenerated": false, "ratio": 4.2555350553505535, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5345606592350554, "avg_score": null, "num_lines": null }
"""An adaptor between python controllers and the Klamp't serial controller interface (SerialController). """ import asyncore,socket import errno import json import time import controller headerlen = 4 def packStrlen(s): l = len(s) assert(l <= 0xffffffff) bytes = [None]*4 bytes[0] = chr(l&0xff) bytes[1] = chr((l>>8)&0xff) bytes[2] = chr((l>>16)&0xff) bytes[3] = chr((l>>24)&0xff) return ''.join(bytes) def unpackStrlen(s): assert len(s)==headerlen return (ord(s[3])<<24)|(ord(s[2])<<16)|(ord(s[1])<<8)|ord(s[0]) def writeSocket(socket,msg): totalsent = 0 while totalsent < len(msg): sent = socket.send(msg[totalsent:]) if sent == 0: raise IOError("socket connection broken") totalsent = totalsent + sent return def readSocket(socket,length): chunk = socket.recv(length) msg = chunk while len(msg) < length: chunk = socket.recv(length-len(msg)) if chunk == '': raise IOError("socket connection broken") msg = msg + chunk return msg class JsonClient(asyncore.dispatcher): """A client that transmits JSON messages in the Klamp't simple serial interface. Sends/receives variable-length messages such that the first 4 bytes are the length of the message (in binary) and the remainder is the payload. Subclasses should override onMessage, which accepts with arbitrary Python objects that can be serialized by the json module. Subclasses should use sendMessage to send a message. To run, call asyncore.loop(). """ def __init__(self, addr): if isinstance(addr,socket.socket): asyncore.dispatcher.__init__(self,s) else: asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( addr ) self.buffer = "" def handle_connect(self): """Called on socket connect. May be overridden.""" pass def handle_close(self): """Called on socket close. May be overridden.""" self.close() def handle_read(self): """Called on read. Do not override; override onMessage instead.""" lenstr = self.read(headerlen) msglen = unpackStrlen(lenstr) msg = self.read(msglen) try: output = json.loads(msg) except ValueError: print "Error parsing JSON object from message '"+msg+"'" return self.onMessage(output) def writable(self): """Called to determine whether there's any data left to be sent. Do not override.""" return (len(self.buffer) > 0) def handle_write(self): """Called to send data when available. Do not override.""" sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def onMessage(self,msg): """Override this to handle an incoming message""" pass def sendMessage(self,msg): """Call this to send an outgoing message""" smsg = json.dumps(msg) #print "JSON message:",smsg self.buffer = self.buffer + packStrlen(smsg) + smsg #print "buffer now:",self.buffer def read(self,length): chunk = self.recv(length) msg = chunk while len(msg) < length: chunk = self.recv(length-len(msg)) if chunk == '': raise IOError("socket connection broken") msg = msg + chunk return msg def recv(self, buffer_size): """Fix for windows sockets throwing EAGAIN crashing asyncore""" while True: try: data = self.socket.recv(buffer_size) if not data: # a closed connection is indicated by signaling # a read condition, and having recv() return 0. print "Socket closed..." self.handle_close() return '' else: return data except socket.error, why: # winsock sometimes throws ENOTCONN if why.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): #print "EGAIN or EWOULDBLOCK returned... spin waiting" time.sleep(0.001) continue elif why.args[0] == errno.ENOTCONN: self.handle_close() return '' else: raise class ControllerClient(JsonClient): """A client that relays Python BaseController object to a SerialController. The interface simply translates messages back and forth using the standard BaseController messages. To run, pass it an address and a control.BaseController interface. Then, call asyncore.loop(). """ def __init__(self,addr,controller): """Sends the output of a controller to a SerialController. controller is assumed to follow the control.BaseController interface. """ self.connecting = True JsonClient.__init__(self,addr) self.controller = controller def handle_connect(self): print "Handle connect" JsonClient.handle_connect(self) def handle_expt(self): self.close() def handle_error(self): JsonClient.handle_error(self) if self.connecting: print print "(Did you forget to start up a Klamp't controller server?)" else: print print "(Did the Klamp't controller server shut down?)" def handle_connect(self): self.connecting = False; self.controller.signal('enter') return def onMessage(self,msg): #print "receiving message",msg try: res = self.controller.output_and_advance(**msg) if res==None: return except Exception as e: print "Exception",e,"on read" return try: #print "sending message",res self.sendMessage(res) except IOError as e: print "Exception",e,"on send" return class SerialController(controller.BaseController): """A controller that maintains a server to write/read messages. It simply translates messages back and forth to a client via the serial interface. """ def __init__(self,addr=('localhost',3456)): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind( addr ) self.sock.listen(1) print "SerialController: Listening on port",addr[1] self.clientsock = None def accept(self): """Get a new connection, if there isn't one""" if self.clientsock == None: pair = self.sock.accept() if pair != None: sock, addr = pair print 'SerialController: Incoming connection from %s' % repr(addr) self.clientsock = sock return def output(self,**inputs): self.accept() if self.clientsock == None: return None #Convert inputs to JSON message smsg = json.dumps(inputs) msg = packStrlen(smsg) + smsg try: writeSocket(self.clientsock,msg) #Read response from serial client lenstr = readSocket(self.clientsock,headerlen) msglen = unpackStrlen(lenstr) msg = readSocket(self.clientsock,msglen) except IOError: print "SerialController: Error writing or reading socket..." self.clientsock.close() self.clientsock = None return None try: output = json.loads(msg) return output except ValueError: #didn't parse properly print "Couldn't read Python object from JSON message '"+msg+"'" return None if __name__ == "__main__": import sys import trajectory_controller host = 'localhost' port = 3456 if len(sys.argv)==1: print "Usage: %s [linear_path_file]\n"%(sys.argv[0],) print "By default connects to localhost:3456" exit() #by default, runs a trajectory controller pathfn = sys.argv[1] pycontroller = trajectory_controller.make(None,pathfn) s = ControllerClient((host,port),pycontroller) asyncore.loop() def make(robot): return SerialController()
{ "repo_name": "hpbader42/Klampt", "path": "Python/control/serialcontroller.py", "copies": "11", "size": "8460", "license": "bsd-3-clause", "hash": -9158912142291061000, "line_mean": 31.4137931034, "line_max": 82, "alpha_frac": 0.5823877069, "autogenerated": false, "ratio": 4.261964735516373, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
# An ad hoc script to convert an lng file to the arclite format import sys def read_header(): with open("../bootstrap/msg.h", encoding="utf-8") as f: RawContent = f.read() content = RawContent.splitlines() return [i[len("#define MSG_"):].split(" ") for i in content] def read_lng(name): with open(name, encoding="utf-8") as f: RawContent = f.read() content = RawContent.splitlines() return content[0], filter(lambda x: x.startswith('"'), content) def read_en(): with open("../en.msg", encoding="utf-8") as f: RawContent = f.read() return RawContent.splitlines() def to_key(string): return string.split("=")[0].strip().upper().replace(".", "_") + " = " if __name__ == "__main__": hdr = read_header() lng_tuple = read_lng(sys.argv[1]) lng = list(lng_tuple[1]) if len(lng) != len(hdr): raise "Wrong number of lines" out = read_en() out[0] = lng_tuple[0] lines_replaced = 0 for i in range(len(lng)): if int(hdr[i][1]) != i: raise "Wrong line number" begin = hdr[i][0] + " = " for n in range(len(out)): if to_key(out[n]).startswith(begin): out[n] = out[n][0:len(begin)] + lng[i][1:-1] lines_replaced += 1 break print(lines_replaced) with open(sys.argv[2], "w", encoding="utf-8") as f: f.write("\n".join(out))
{ "repo_name": "data-man/FarAS", "path": "plugins/arclite/tools/add_lng.py", "copies": "2", "size": "1280", "license": "bsd-3-clause", "hash": 8574222488666837000, "line_mean": 21.4561403509, "line_max": 70, "alpha_frac": 0.61171875, "autogenerated": false, "ratio": 2.689075630252101, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4300794380252101, "avg_score": null, "num_lines": null }
# Anadimos puntuaciones. Cada pelota incrementa un contador cuando # rebota en el fondo del contrario. import pygame from pong_v0 import SharedState from pong_v1 import PlayerRacket from pong_v2 import Pong_v2 from pong_v2 import CPURacket import lib.colors as Color from pong_v0 import Ball WIDTH = 0 HEIGHT = 1 class ScoreBall(Ball): #def __init__(self, # color, # width, # height, # initial_x_coordinate, # initial_y_coordinate, # display_size): #super().__init__(color, width, height, initial_x_coordinate, initial_y_coordinate, display_size) #self.player_score = 0 #self.CPU_score = 0 def ball_hits_bottom(self): super().ball_hits_bottom() #self.CPU_score += 10 SharedState.CPU_score += 10 #self.CPU_score -= BallPosition.CPU_motion def ball_hits_top(self): super().ball_hits_top() SharedState.player_score += 10 #self.player_score += 10 #self.player_score -= BallPosition.player_motion class PlayerRacketMotion(PlayerRacket): def update(self): super().update() SharedState.player_score -= abs(self.motion)/1000 if SharedState.player_score < 0: SharedState.player_score = 0 #self.player_score -= BallPosition.player_motion #print("*") class CPURacketMotion(CPURacket): def update(self): super().update() SharedState.CPU_score -= abs(self.motion)/1000 if SharedState.CPU_score < 0: SharedState.CPU_score = 0 class Pong_v3(Pong_v2): def __init__(self, width = 800, height = 600, caption = "A version of Pong"): super().__init__(width, height, caption) self.all_sprites_list.remove(self.ball) self.ball_width = 16 self.ball_height = 16 self.initial_x_coordinate = self.display_size[WIDTH]//2 - self.ball_width//2 self.initial_y_coordinate = 3*self.display_size[HEIGHT]//4 - self.ball_height//2 self.ball_color = Color.white self.ball = ScoreBall( color = self.ball_color, width = self.ball_width, height = self.ball_height, initial_x_coordinate = self.initial_x_coordinate, initial_y_coordinate = self.initial_y_coordinate, display_size = self.display_size ) self.all_sprites_list.add(self.ball) self.all_sprites_list.remove(self.player_racket) self.racket_width = 128 self.racket_height = 2 self.racket_color = Color.green self.player_racket = PlayerRacketMotion( color = self.racket_color, width = self.racket_width, height = self.racket_height, display_size = self.display_size) self.all_sprites_list.add(self.player_racket) self.all_sprites_list.remove(self.CPU_racket) self.CPU_racket = CPURacketMotion( color = self.racket_color, width = self.racket_width, height = self.racket_height, display_size = self.display_size) self.all_sprites_list.add(self.CPU_racket) def draw_frame(self): super().draw_frame() #self.ball.CPU_score -= BallPosition.CPU_motion #self.ball.player_score -= BallPosition.player_motion font = pygame.font.Font(None, 74) text = font.render(str(int(SharedState.CPU_score)), 1, Color.white) self.display.blit(text, (10, 10)) text = font.render(str(int(SharedState.player_score)), 1, Color.white) self.display.blit(text, (10, self.display_size[1] - 50)) if __name__ == "__main__": display = Pong_v3() display.run()
{ "repo_name": "vicente-gonzalez-ruiz/YAPT", "path": "workshops/programacion_python_ESO/pong_v3.py", "copies": "2", "size": "3838", "license": "cc0-1.0", "hash": -4633555864500381000, "line_mean": 31.5254237288, "line_max": 105, "alpha_frac": 0.5914538822, "autogenerated": false, "ratio": 3.517873510540788, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.01839961556128414, "num_lines": 118 }
#An administrator is a special kind of user. Write a class called #Admin that inherits from the User class you wrote in Exercise 9-3 (page 166) #or Exercise 9-5 (page 171). Add an attribute, privileges, that stores a list #of strings like "can add post", "can delete post", "can ban user", and so on. #Write a method called show_privileges() that lists the administrator’s set of #privileges. Create an instance of Admin, and call your method. class User(): def __init__(self, first_name, last_name, username, email, location): """Iniciar usuario""" self.first_name = first_name.title() self.last_name = last_name.title() self.username = username self.email = email self.location = location.title() self.login_attempts = 0 def describe_user(self): """Informacion del usuario""" print("\n" + self.first_name + " " + self.last_name) print(" Username: " + self.username) print(" Email: " + self.email) print(" Location: " + self.location) def greet_user(self): """Saludo al usuario""" print("\nBienvenido/a, " + self.username + "!") def increment_login_attempts(self): """Incrementando cuentas""" self.login_attempts += 1 def reset_login_attempts(self): """Restablecer cuentas a 0""" self.login_attempts = 0 class Admin(User): """Usuario con privilegios administrativos""" def __init__(self, first_name, last_name, username, email, location): """Iniciando administrador.""" super().__init__(first_name, last_name, username, email, location) self.privileges = [] def show_privileges(self): """Mostrando privilegios de administrador""" print("\nPrivilegios:") for privilege in self.privileges: print("- " + privilege) noemi = Admin('noemi', 'flores', 'noemi_flores', 'noemi_flores@uadec.edu.mx', 'Saltillo') noemi.describe_user() noemi.privileges = [ 'Restablecer contraseñas ', 'Cancelar cuentas', ] noemi.greet_user() noemi.show_privileges()
{ "repo_name": "AnhellO/DAS_Sistemas", "path": "Ene-Jun-2019/NoemiFlores/PrimerParcial/PrimeraPractica/Admin.py", "copies": "1", "size": "2104", "license": "mit", "hash": -2912615444296082000, "line_mean": 33.4590163934, "line_max": 89, "alpha_frac": 0.6316039981, "autogenerated": false, "ratio": 3.478476821192053, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4610080819292053, "avg_score": null, "num_lines": null }
""" An administrator is a special kind of user. Write a class called Admin that inherits from the User class you wrote in Exercise 9-3 (page 166) or Exercise 9-5 (page 171). Add an attribute, privileges, that stores a list of strings like "can add post", "can delete post", "can ban user", and so on. Write a method called show_privileges() that lists the administrator’s set of privileges. Create an instance of Admin, and call your method. """ class User(object): """Representa un usuario.""" def __init__(self, first_name, last_name, age): self.first_name = first_name self.last_name = last_name self.age = age self.login_attempts = 0 def describe_user(self): """ method that prints a summary of the user’s information """ description = "Nombre: " + self.first_name.title()+ "\nApellido: "+ self.last_name.title()+ "\nEdad: "+ str(self.age) return description def greet_user(self): """ method that prints a personalized greeting to the user""" print("¡Bienvenid@ " + self. first_name.title()+ ' '+self.last_name.title()+'!') def increment_login_attempts(self): self.login_attempts+=1 def reset_login_attempts(self): self.login_attempts=0 class Admin(User): """Representa un tipo de usuario, especificamente un administrador .""" def __init__(self, first_name, last_name, age): super(Admin, self).__init__(first_name, last_name, age) self.privileges = [] def set_privileges(self, *privileges): self.privileges=privileges def show_privileges(self): """ method that lists the administrator’s set of privileges """ print("\nEl administrador tiene los siguienes privilegios: ") for privilege in self.privileges: print("- "+ privilege) usuario = User('karla', 'berlanga', 21) print(usuario.describe_user()) usuario.greet_user() print("\n") print("Administrador") administrador = Admin('pablo', 'garza', 21) print(administrador.describe_user()) administrador.set_privileges('can add post', 'can delete post', 'can ban user') administrador.show_privileges()
{ "repo_name": "AnhellO/DAS_Sistemas", "path": "Ene-Jun-2019/Karla Berlanga/Practica 1/admin.py", "copies": "1", "size": "2161", "license": "mit", "hash": 4444334431379872000, "line_mean": 36.7894736842, "line_max": 125, "alpha_frac": 0.6638811513, "autogenerated": false, "ratio": 3.5427631578947367, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47066443091947363, "avg_score": null, "num_lines": null }
"""An advanced association proxy example which illustrates nesting of association proxies to produce multi-level Python collections, in this case a dictionary with string keys and sets of integers as values, which conceal the underlying mapped classes. This is a three table model which represents a parent table referencing a dictionary of string keys and sets as values, where each set stores a collection of integers. The association proxy extension is used to hide the details of this persistence. The dictionary also generates new collections upon access of a non-existent key, in the same manner as Python's "collections.defaultdict" object. """ import operator from sqlalchemy import Column from sqlalchemy import create_engine from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import String from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship from sqlalchemy.orm import Session from sqlalchemy.orm.collections import MappedCollection class Base(object): id = Column(Integer, primary_key=True) Base = declarative_base(cls=Base) class GenDefaultCollection(MappedCollection): def __missing__(self, key): self[key] = b = B(key) return b class A(Base): __tablename__ = "a" associations = relationship( "B", collection_class=lambda: GenDefaultCollection( operator.attrgetter("key") ), ) collections = association_proxy("associations", "values") """Bridge the association from 'associations' over to the 'values' association proxy of B. """ class B(Base): __tablename__ = "b" a_id = Column(Integer, ForeignKey("a.id"), nullable=False) elements = relationship("C", collection_class=set) key = Column(String) values = association_proxy("elements", "value") """Bridge the association from 'elements' over to the 'value' element of C.""" def __init__(self, key, values=None): self.key = key if values: self.values = values class C(Base): __tablename__ = "c" b_id = Column(Integer, ForeignKey("b.id"), nullable=False) value = Column(Integer) def __init__(self, value): self.value = value if __name__ == "__main__": engine = create_engine("sqlite://", echo=True) Base.metadata.create_all(engine) session = Session(engine) # only "A" is referenced explicitly. Using "collections", # we deal with a dict of key/sets of integers directly. session.add_all([A(collections={"1": set([1, 2, 3])})]) session.commit() a1 = session.query(A).first() print(a1.collections["1"]) a1.collections["1"].add(4) session.commit() a1.collections["2"].update([7, 8, 9]) session.commit() print(a1.collections["2"])
{ "repo_name": "kawamon/hue", "path": "desktop/core/ext-py/SQLAlchemy-1.3.17/examples/association/dict_of_sets_with_default.py", "copies": "7", "size": "2880", "license": "apache-2.0", "hash": 5106265876678727000, "line_mean": 27.5148514851, "line_max": 76, "alpha_frac": 0.6927083333, "autogenerated": false, "ratio": 4.090909090909091, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 101 }
# An adventure game health = 10 enemy = 10 gold = 0 strength = 1 rat = False bear = False skeleton = False from random import randint from time import sleep inventory = [] newinventory = "" def healthbar(): #prints status bar global health, gold, strength, inventory print("-----------------------------------------------------------------------------------") print("You have",health,"HP",gold,"gold and in your inventory is: ", ", ".join(inventory)) print("-----------------------------------------------------------------------------------") def loseHP(hp,message): #removes health from player along with a message global health,strength print(message) health = health - hp if health <= 0: print(" YOU DID NOT SUCCEED!GAME OVER!") sleep(1) print("YOU HAVE LOST ALL YOUR HP") gameover() else: print(" Your health is now",health,"HP") def pos_check(): #Checks where the player is through the game in order to put them there after an attacj global rat, bear, skeleton if rat == False: rat = True room3() elif bear == False: bear = True room6() elif skeleton == False: skeleton = True room9() def question(): #asks the player how they want to attack the enemy global enemy if enemy == 0: enemy = 15 if "sword" in inventory: answer = input("Attack,Dodge or Slash? ") #If there is a sword in the player's inventory then they can use SLASH if answer.lower() == "attack": attack() elif answer.lower() == "dodge": block() elif answer.lower() == "slash": slash() else: answer = input("Attack or Dodge? ") if answer.lower() == "attack": attack() elif answer.lower() == "dodge": block() else: print("Not a valid response") question() def enemyattack(): #Controls enemy's damage (base damage * strength stat of that enemy global enemy global strength sleep(1) print(" Your enemy is now attacking") y = randint(1,6) sleep(1) if y == 1: print(" Your enemy missed") question() elif y == 2 or y == 3: loseHP(2*strength," Critical Damage!You have lost " + str(2 * strength )+"HP") # converts formula's output into a string else: loseHP(1*strength," Hit! You lose " + str(1* strength) +"HP") question() def attack(): #Controls damage of the move ATTACK global enemy global room9 x = randint(1,8) if x == 1: sleep(1) print("You missed, no damage is done") sleep(1) print("Your enemy's health is still",enemy,"HP") sleep(2) enemyattack() elif x == 2 or x == 3: sleep(1) print("Critical hit +4DMG to your enemy") enemy = enemy - 4 print("Your enemy's health is now",enemy, "HP") if enemy <= 0: sleep(1) print("You killed it!") pos_check() else: sleep(1) enemyattack() else: print("You hit! +2DMG to your enemy") enemy = enemy - 2 print("Your enemy's health is now",enemy,"HP") if enemy <= 0: print("You killed it!") pos_check() else: sleep(1) enemyattack() def block(): #Controls effectiveness of the move BLOCK blk = randint(1,4) if blk < 3: sleep(1) print("Successful Dodge! Your enemy couldn't attack you") question() elif blk >= 3: sleep(1) print("Dodge Failed!") enemyattack() def slash(): #Controls effectivenes of move SLASH global enemy global bear,rat,skeleton x = randint(1,8) if x == 1: sleep(1) print("You missed, no damage is done") sleep(2) print("Your enemy's health is still",enemy, "HP") enemyattack() elif x == 2 or x == 3: sleep(1) print("Critical hit +6DMG to your enemy") enemy = enemy - 6 sleep(1) print("Your enemy's health is now",enemy,"HP") if enemy <= 0: sleep(1) print("You killed it!") pos_check() else: sleep(1) enemyattack() else: sleep(1) print("You hit! +3DMG to your enemy") enemy = enemy -3 print("Your enemy's health is now",enemy,"HP") if enemy <= 0: sleep(1) print("You killed it!") pos_check() else: sleep(1) enemyattack() def room1(): #First Room global enemy print("------------") print("+ The Cave +") print("------------") print("You have entered a dark and desolate cave, a landslide has blocked the exit") sleep(.5) print("A.Go forward,who knows what lies ahead?") sleep(.5) print("B.Try as hard as possible to get out") sleep(.5) answer = input("What is your decision? ") if answer.lower() == "a": room2() elif answer.lower() == "b": loseHP(10,"Resistance is futile,you have wasted your energy,-10HP") else: print("Not a valid response") room1() def room2(): #Second Room global enemy enemy = 10 print("A creature?") sleep(1) print("You see a small rat(1 strength) ahead,harmles but it has to be dealt with") sleep(2) print(" _..----.._ _") print(" .' .--. '-.(0)_") print(" '-.__.-'''=:| , _)_ \__ . c\'-..") print(" '''------'---''---'-") sleep(3) print("You have 2 options Attack or Dodge") sleep(1) question() def room3(): #Room 3 print("-----------------------------------------------------------------------") if not "sword" in inventory: print("You enter a small corridor,Ahead is a trap door and to your right") print("Is an old ladder") answer = input("Ladder or Door? ") if answer.lower() == "ladder": ladder1() elif answer.lower() == "door": room4() else: print("Not a valid response") sleep(4) room3() else: sleep(2) print("You climb back down the ladder and go through the trap door") sleep(2) room4() def ladder1(): global inventory print("-------------------------------------------------------------------------") print("You find yourself in an attic, nothing is here except a sword") answer = input("Do you want to pick it up, yes or no ") if answer.lower() == "yes": print("You pick up the sword, you have learned SLASH") sleep(1) print("You strut back to the corridor feeling mightier than ever") sleep(1) inventory.append("sword") room3() elif answer.lower() == "no": sleep(1) print("The sword is left untouched and you retreat back to the corridor") room3() else: print("Not a valid response") ladder1() def room4(): #room 4 global health global inventory global gold if "sword" in inventory: healthbar() else: print("-----------------------------------------------------------------------------------") print("You have",health,"HP and in your inventory is nothing") print("-----------------------------------------------------------------------------------") sleep(2) print("You fall down but a magical cloud cushions your fall and heals you +10HP") health = health + 10 sleep(2) print("You hear crunching under the soles of your feet,the skeletal remains of") sleep(3) print("past adventurers litter your vision,one has an old leather rucksack with worn straps") sleep(3) print("Would you like to take the rucksack") question1() def question1(): #used as a checkpoint if the user inputs an invalid response global health global inventory global gold answer = input("Yes or No? ") if answer.lower() == "yes": print("You have found a ring of healing(+10HP),a rotten apple and 3 gold") health = health + 10 inventory.append("ring of healing(+10HP)") inventory.append("a rotten apple") gold = gold + 3 sleep(1) print("You continue on your journey,oblivious to how you will get out") room5() elif answer.lower() == "no": print("You can't help but think something may have been in there and continue along your journey") sleep(1) room5() else: print("not a valid response") sleep(1) question1() #resets back to the question rather than the beginning of room 4 to stop 10HP being added every time def room5(): global inventory, strength, enemy, bear healthbar() sleep(3) print("A bear (1.5 strength) walks forward out of the shadows,it looks hungry") sleep(2) print(""" .'''. ___,,,___ .'``." : (\ `.'''``` ```'''-' /) ; : \ `./ .' `. :.' / _ _ \ | 0} {0 | | / \ | | / \ | | / \ | \ | .-. | / `. | . . / \ . . | .' `-._\.'.( ).'./_.-' `\' `._.' '/' `. --'-- .' `-...-'""") if "a rotten apple" in inventory: #If the player has an apple then it can poison it sleep(3) print("You throw the rotten apple and luckily it is stupid enough to fall for your trap") bear = True sleep(3) inventory.remove("a rotten apple") #removes apple from the player's inventory print("It dies and you are left with 1 less apple") room6() else: enemy = 15 strength = 1.5 question() def room6(): #Room 6 healthbar() print("You are shaken after the encounter with the bear,") sleep(2) print("You waddle around,regretting your decision to stay here") sleep(1) print("You continue down your path with little to no hope of escaping") sleep(2) print("SHRIEK") sleep(1) print("*shudder*") room7() def room7(): #Room 7 global inventory global health healthbar() print("You see a man lying on the floor") sleep(2) print("the light is flickering too much to see his face but you faintly here deathly groans") sleep(2) answer = input("Do you want to approach the figure? Yes or No ") if answer.lower() == "yes": print("The man welcomes you and is grateful for your kindness") inventory.append("a friend") sleep(2) room8() elif answer.lower() == "no": print("You leave the man where he lies") sleep(2) room8() else: print("Not a Valid Response") sleep(2) room7() def room8(): #Room8 healthbar() global enemy global strength global health print("You have been travelling and feel worn out") sleep(2) print("You begin") sleep(2) print("to feel") sleep(2) print("Tired") sleep(3) print("ZzZzZzZzZzZzZzZzZzZ") sleep(4) enemy = 20 if "a friend" in inventory: print("You are awoken by your friend") sleep(2) print("He screams at you and at the corner of your eye you see") sleep(2) print("a large skeleton with a sword in his hand") sleep(2) question() else: sleep(2) loseHP(3,"You are caught off guard by a sword wielding skeleton and lose 3HP") sleep(2) question() def room9(): #Room9 sleep(1) sleep(3) print("You stumble forward,barely able to walk") sleep(2) print("You find a map") sleep(2) print("It reads...") sleep(2) print("YOU ARE LOST") sleep(2) print("It doesn't help you") sleep(2) endchoice() def endchoice(): #the player's final choice healthbar() sleep(1) print("You see a light up ahead, a golden glow shines below you,but you see") sleep(2) print("sunlight above you") sleep(1) print("It could be your only way out but enough gold to last a lifetime lies below") sleep(3) print("It's your choice") sleep(1) print("A.Go lay claim to the riches that lie below") sleep(1) print("B.Leave the gold and escape back to your miserable life") answer = input("It's your choice ") if answer.lower() == "a": print("You frolick in your fortune and lay claim to millions") sleep(1) print("Congratulations! You now are a millionaire") sleep(3) print("However") sleep(2) print("You Grap the gold,filling your pockets when you trip over") sleep(1) print("an old lever on the floor, everything seems OK unitil you") sleep(1) print("see the entrance blocked,you are trapped") sleep(1) print("But hey, at least you've got all that gold") sleep(1) badending() elif answer.lower() == "b": print("You escape, the cave crashes down behind you") sleep(2) print("Thankfully you got out of there in time") sleep(2) print("You leave the cave not entirely sure how you got there") sleep(2) print("or what happpened") goodending() else: print("Not a Valid Respnse") endchoice() def goodending(): print(" _") print(" .-. / \ _") print(" ^^ / \ /^./\__ _/ \ ") print(" _ .--'\/\_ \__/. \ / \ ^^ ___") print(" / \_ _/ ^ \/ __ :' /\/\ /\ __/ \ ") print(" / \ / .' _/ / \ ^ / \/ \/ .`'\_/\ ") print(" /\/\ /\/ :' __ ^/ ^/ `--./.' ^ `-.\ _ _:\ _") print(" / \/ \ _/ \-' __/.' ^ _ \_ .'\ _/ \ . __/ \ ") print(" /\ .- `. \/ \ / -. _/ \ -. `_/ \ / `._/ ^ \ ") print(" / `-.__ ^ / .-'.--' . / `--./ .-' `-. `-. `. - `.") print(" @/ `. / / `-. / .-' / . .' \ \ \ .- \ %") print(" @(88%@)@%% @)&@&(88&@.-_=_-=_-=_-=_-=_.8@% &@&&8(8%@%8)(8@%8 8%@)%") print("@88:::&(&8&&8::JGS:&`.~-_~~-~~_~-~_~-~~=.'@(&%::::%@8&8)::&#@8::::") print("`::::::8%@@%:::::@%&8:`.=~~-.~~-.~~=..~'8::::::::&@8:::::&8::::::'") print(" `::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::'") credit() def badending(): print(" ; , ") sleep(.5) print(" ,; '. ") sleep(.5) print(" ;: :; ") sleep(.5) print(" :: :: ") sleep(.5) print(" :: :: ") sleep(.5) print(" ': : ") sleep(.5) print(" :. : ") sleep(.5) print(" ;' :: :: ' ") sleep(.5) print(" .' '; ;' '. ") sleep(.5) print(" :: :; ;: :: ") sleep(.5) print(" ; :;. ,;: :: ") sleep(.5) print(" :; :;: ,;' :: ") sleep(.5) print(" ::. ':; ..,.; ;:' ,.;: ") sleep(.5) print(" '''... '::,::::: ;: .;.;''' ") sleep(.5) print(" ''''....;:::::;,;.;''' ") sleep(.5) print(" .:::.....''':::::::'',...;::::;. ") sleep(.5) print(" ;:' '''''';.,;:::::;.''''''' ':; ") sleep(.5) print(" ::' ;::;:::;::.. :; ") sleep(.5) print(" :: ,;:::::::::::;:.. :: ") sleep(.5) print(" ;' ,;;:;::::::::::::::;';.. ':. ") sleep(.5) print(":: ;: ::::::'''':::::: ': :: ") sleep(.5) print(" :. :: ::::::; ::::::: : ; ") sleep(.5) print(" ; :: ::::::: ::::::: : ; ") sleep(.5) print(" ' :: ::::::....:::::' ,: ' ") sleep(.5) print(" ' :: :::::::::::::' :: ") sleep(.5) print(" :: ':::::::::'' :: ") sleep(.5) print(" ': '''''''' :: ") sleep(.5) print(" :: ;: ") sleep(.5) print(" ':; ;:' ") sleep(.5) print(" '; ,;' ") sleep(.5) print(" '' '' ") sleep(.5) print(" ' '") sleep(2) gameover() def gameover(): print(" ________ ________ ") sleep(.5) print(" / _____/_____ _____ ____ \_____ \___ __ ___________ ") sleep(.5) print("/ \ ___\__ \ / \_/ __ \ / | \ \/ // __ \_ __ \ ") sleep(.5) print("\ \_\ \/ __ \| Y Y \ ___/ / | \ /\ ___/| | \/") sleep(.5) print(" \______ (____ /__|_| /\___ > \_______ /\_/ \___ >__| ") sleep(.5) print(" \/ \/ \/ \/ \/ \/ ") sleep(3) credit() def credit(): print("Credits:") sleep(2) print("ASCII Art - ascii.com") sleep(1) print("Programming - Luke B") sleep(1) print("ASCII Text - Luke B") print("---------------------") print(" TESTERS ") print("+Isaac +Calum") print("+Matt A") print("---------------------") sleep(2) print(" | | | ,- | o ") print(" |- |-. ,-: ;-. | , . . ,-. . . | ,-. ;-. ;-. | ,-: . . . ;-. ,-: ") print(" | | | | | | | |< | | | | | | |- | | | | | | | | | | | | | | | ") print(" `-' ' ' `-` ' ' ' ` `-| `-' `-` | `-' ' |-' ' `-` `-| ' ' ' `-| ") print(" `-' -' ' `-' `-' ") sleep(10) quit() # Leave this at the bottom - it makes room1 run automatically when you # run your code. if __name__ == "__main__": room1()
{ "repo_name": "CodeLuke/quiz_adventure", "path": "adventure.py", "copies": "1", "size": "20085", "license": "mit", "hash": -2896831781872262700, "line_mean": 28.2941176471, "line_max": 127, "alpha_frac": 0.4016928056, "autogenerated": false, "ratio": 3.6738613499176878, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45755541555176876, "avg_score": null, "num_lines": null }
"""An Agenda is a list-like container of Appt (appointment). Author: Megan McMillan for CIS 399, U. Oregon Each Appt has a date, a start time, an end time, and a textual description. They can be converted to and from strings, using the from_string class method and the __str__ method. An Agenda can be read from a file using the from_file class method. Intersecting Agendas produces a new Agenda whose Appts are periods that are in the overlap of Appts in the first and second Agenda. """ # Date handling import arrow # Replacement for datetime, based on moment.js import datetime # But we still need time from dateutil import tz # For interpreting local times import unit_tests class Appt: """ A single appointment, starting on a particular date and time, and ending at a later time the same day. """ def __init__(self, begin, end): """Create an appointment on date from begin time to end time. Arguments: begin: An arrow datetime object. When the appointment starts. end: An arrow datetime object, after begin. When the appointments ends. desc: A string describing the appointment Raises: ValueError if appointment ends before it begins """ self.begin = begin self.end = end if begin > end : raise ValueError("Appointment end must be after begin") return @classmethod def from_string(cls, txt): """Factory parses a string to create an Appt""" fields = txt.split() if len(fields) != 3: raise ValueError("Appt literal must start with date, time, time, separated by blanks") appt_date_text = fields[0] appt_begin_text = fields[1] appt_end_text = fields[2] fields = appt_date_text.split("-") try: year = int(fields[0].strip()) month = int(fields[1].strip()) day = int(fields[2].strip()) except: raise ValueError("Date in Appt literal should be 9999-99-99 (Year-Month-Day)") new_begin = "{}-{}-{} {}".format(year, month, day, appt_begin_text) new_end = "{}-{}-{} {}".format(year, month, day, appt_end_text) begin = arrow.get(new_begin, 'YYYY-M-D HH:mm') end = arrow.get(new_end, 'YYYY-M-D HH:mm') result = Appt(begin, end) return result def __lt__(self, other): """Does this appointment finish before other begins? Arguments: other: another Appt Returns: True iff this Appt is done by the time other begins. """ return self.end <= other.begin def __gt__(self, other): """Does other appointment finish before this begins? Arguments: other: another Appt Returns: True iff other is done by the time this Appt begins """ return other.end < self.begin def overlaps(self, other): """Is there a non-zero overlap between this appointment and the other appointment? Arguments: other is an Appt Returns: True iff there exists some duration (greater than zero) between this Appt and other. """ return ((self.begin < other.end) & (other.begin < self.end)) def intersect(self, other): """Return an appointment representing the period in common between this appointment and another. Requires self.overlaps(other). Arguments: other: Another Appt Returns: An appointment representing the time period in common between self and other. """ assert(self.overlaps(other)) # We know the day must be the same. # Find overlap of times: # Later of two begin times, earlier of two end times begin_time = max(self.begin.time(), other.begin.time()) end_time = min(self.end.time(), other.end.time()) new_begin = "{} {}".format(self.begin.date(), begin_time) new_end = "{} {}".format(self.begin.date(), end_time) begin = arrow.get(new_begin, 'YYYY-MM-DD HH:mm') end = arrow.get(new_end, 'YYYY-MM-DD HH:mm') result = Appt(begin, end) return result def union(self, other): """Return an appointment representing the combined period in common between this appointment and another. Requires self.overlaps(other). Arguments: other: Another Appt Returns: An appointment representing the time period spanning both self and other. """ assert(self.overlaps(other)) # We know the day must be the same. # Find overlap of times: # Earlier of two begin times, later of two end times begin = min(self.begin, other.begin) end = max(self.end, other.end) begin = arrow.get(begin) end = arrow.get(end) return Appt(begin, end) def __str__(self): """String representation of appointment. Example: 2012.10.31 13:00 13:50 This format is designed to be easily divided into parts: split on whitespace, then split date on '.' and times on ':'. """ begstr = self.begin.strftime("%Y-%m-%d %H:%M ") endstr = self.end.strftime("%H:%M") return begstr + endstr class Agenda: """An Agenda is essentially a list of appointments, with some agenda-specific methods. """ def __init__(self): """An empty agenda.""" self.appts = [ ] @classmethod def from_file(cls, f): """Factory: Read an agenda from a file. Arguments: f: A file object (as returned by io.open) or an object that emulates a file (like stringio). returns: An Agenda object """ agenda = cls() for line in f: line = line.strip() if line == "" or line.startswith("#"): # Skip blank lines and comments pass else: try: agenda.append(Appt.from_string(line)) except ValueError as err: print("Failed on line: ", line) print(err) return agenda def append(self,appt): """Add an Appt to the agenda.""" self.appts.append(appt) def intersect(self,other): """Return a new agenda containing appointments that are overlaps between appointments in this agenda and appointments in the other agenda. Arguments: other: Another Agenda, to be intersected with this one """ result = Agenda() for thisappt in self.appts: for otherappt in other.appts: if thisappt.overlaps(otherappt): result.append(thisappt.intersect(otherappt)) return result def normalize(self): """Merge overlapping events in an agenda. For example, if the first appointment is from 1pm to 3pm, and the second is from 2pm to 4pm, these two are merged into an appt from 1pm to 4pm, with a combination description. After normalize, the agenda is in order by date and time, with no overlapping appointments. """ if len(self.appts) == 0: return ordering = lambda ap: ap.begin self.appts.sort(key=ordering) normalized = [ ] # print("Starting normalization") cur = self.appts[0] for appt in self.appts[1:]: if cur.overlaps(appt): # Overlapping # print("Merging ", cur, "\n"+ # "with ", appt) cur = cur.union(appt) # print("New cur: ", cur) else: # Not overlapping # print("Gap - emitting ", cur) normalized.append(cur) cur = appt # print("Last appt: ", cur) normalized.append(cur) self.appts = normalized def normalized(self): """ A non-destructive normalize (like "sorted(l)" vs "l.sort()"). Returns a normalized copy of this agenda. """ copy = Agenda() copy.appts = self.appts copy.normalize() return copy def complement(self, freeblock): """Produce the complement of an agenda within the span of a timeblock represented by an appointment. For example, if this agenda is a set of appointments, produce a new agenda of the times *not* in appointments in a given time period. Args: freeblock: Looking for time blocks in this period that are not conflicting with appointments in this agenda. Returns: A new agenda containing exactly the times that are within the period of freeblock and not within appointments in this agenda. The description of the resulting appointments comes from freeblock.desc. """ copy = self.normalized() comp = Agenda() cur_time = freeblock.begin for appt in copy.appts: if appt < freeblock: continue if appt > freeblock: if cur_time < freeblock.end: comp.append(Appt(cur_time,freeblock.end)) cur_time = freeblock.end break if cur_time < appt.begin: # print("Creating free time from", cur_time, "to", appt.begin) comp.append(Appt(cur_time, appt.begin)) cur_time = max(appt.end,cur_time) if cur_time < freeblock.end: # print("Creating final free time from", cur_time, "to", freeblock.end) comp.append(Appt(cur_time, freeblock.end)) return comp def __len__(self): """Number of appointments, callable as built-in len() function""" return len(self.appts) def __iter__(self): """An iterator through the appointments in this agenda.""" return self.appts.__iter__() def __str__(self): """String representation of a whole agenda""" rep = "" for appt in self.appts: rep += str(appt) + "\n" return rep[:-1] def __eq__(self,other): """Equality, ignoring descriptions --- just equal blocks of time""" if len(self.appts) != len(other.appts): return False for i in range(len(self.appts)): mine = self.appts[i] theirs = other.appts[i] if not (mine.begin == theirs.begin and mine.end == theirs.end): return False return True
{ "repo_name": "m364nm/final-project-cis399", "path": "agenda.py", "copies": "1", "size": "10817", "license": "artistic-2.0", "hash": -4411812768681411000, "line_mean": 31.8784194529, "line_max": 98, "alpha_frac": 0.567994823, "autogenerated": false, "ratio": 4.142857142857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5210851965857143, "avg_score": null, "num_lines": null }
"""An Agenda is a list-like container of Appt (appointment). """ # Date handling import arrow # Replacement for datetime, based on moment.js class Appt: """ A single appointment, starting on a particular date and time, and ending at a later time the same day. """ def __init__(self, begin, end, desc): """Create an appointment on date from begin time to end time. Arguments: begin: A arrow object. When the appointment starts. end: A arrow object object, after begin. When the appointments ends. desc: A string describing the appointment Raises: ValueError if appointment ends before it begins Example: Appt( arrow obj: 03/01/2016 12:30pm arrow obj: 03/03/2016 1230pm desc: classes ) """ self.begin = begin self.end = end if begin >= end : raise ValueError("Appointment end must be after begin " + begin.format('MM/DD/YYYY h:mm') + " " + end.format('MM/DD/YYYY h:mm')) self.desc = desc return @classmethod def from_string(cls, txt): """Factory parses a string to create an Appt""" fields = txt.split("|") if len(fields) != 2: raise ValueError("Appt literal requires exactly one '|' before description") time = fields[0].strip() desc = fields[1].strip() fields = time.split(" to ") start = fields[0] finish = fields[1] begin = arrow.get(start, 'MM-DD-YYYY h:mm A') end = arrow.get(finish, 'MM-DD-YYYY h:mm A') result = Appt(begin, end, desc) return result @classmethod def from_dict(cls, dict): begin = arrow.get(dict["start"], "MM/DD/YYYY h:mm A") end = arrow.get(dict["end"], "MM/DD/YYYY h:mm A") desc = dict["desc"] return Appt(begin, end, desc) def convert_dict(self): """ Converts an appt to a dict so we can pass back to the web page :return: temp: a dict version of the appointment """ temp = { "start": self.begin.isoformat(), "end": self.end.isoformat(), "desc": self.desc } return temp def __lt__(self, other): """Does this appointment finish before other begins? Arguments: other: another Appt Returns: True iff this Appt is done by the time other begins. """ return self.end <= other.begin def __gt__(self, other): """Does other appointment finish before this begins? Arguments: other: another Appt Returns: True iff other is done by the time this Appt begins """ return other < self def overlaps(self, other): """Is there a non-zero overlap between this appointment and the other appointment? Arguments: other is an Appt Returns: True iff there exists some duration (greater than zero) between this Appt and other. """ return not (self < other or other < self) def intersect(self, other, desc=""): """Return an appointment representing the period in common between this appointment and another. Requires self.overlaps(other). Arguments: other: Another Appt desc: (optional) description text for this appointment. Returns: An appointment representing the time period in common between self and other. Description of returned Appt is copied from this (self), unless a non-null string is provided as desc. """ if desc=="": desc = self.desc assert(self.overlaps(other)) # We know the day must be the same. # Find overlap of times: # Later of two begin times, earlier of two end times begin = max(self.begin, other.begin) end = min(self.end, other.end) return Appt(begin, end, desc) def union(self, other, desc=""): """Return an appointment representing the combined period in common between this appointment and another. Requires self.overlaps(other). Arguments: other: Another Appt desc: (optional) description text for this appointment. Returns: An appointment representing the time period spanning both self and other. Description of returned Appt is concatenation of two unless a non-null string is provided as desc. """ if desc=="": desc = self.desc + " " + other.desc assert(self.overlaps(other)) # We know the day must be the same. # Find overlap of times: # Earlier of two begin times, later of two end times begin = min(self.begin, other.begin) end = max(self.end, other.end) return Appt(begin, end, desc) def __str__(self): """String representation of appointment. Example: 2012.10.31 13:00 13:50 | CIS 210 lecture This format is designed to be easily divided into parts: Split on '|', then split on whitespace, then split date on '.' and times on ':'. """ begstr = self.begin.format("MM/DD/YYYY h:mm A") endstr = self.end.strftime("MM/DD/YYYY h:mm A") return begstr + " to " + endstr + "| " + self.desc class Agenda: """An Agenda is essentially a list of appointments, with some agenda-specific methods. """ def __init__(self): """An empty agenda.""" self.appts = [ ] @classmethod def from_file(cls, f): """Factory: Read an agenda from a file. Arguments: f: A file object (as returned by io.open) or an object that emulates a file (like stringio). returns: An Agenda object """ agenda = cls() for line in f: line = line.strip() if line == "" or line.startswith("#"): # Skip blank lines and comments pass else: try: agenda.append(Appt.from_string(line)) except ValueError as err: print("Failed on line: ", line) print(err) return agenda @classmethod def from_dict(cls, dict): agenda = cls() for appt in dict: agenda.append(Appt.from_dict(appt)) return agenda def list_convert(self): """ Converts the agenda to a list that can be past around using json :return: list: a list object """ list = [] for appt in self: temp = appt.convert_dict() list.append(temp) return list def append(self,appt): """Add an Appt to the agenda.""" self.appts.append(appt) def intersect(self,other,desc=""): """Return a new agenda containing appointments that are overlaps between appointments in this agenda and appointments in the other agenda. Titles of appointments in the resulting agenda are taken from this agenda, unless they are overridden with the "desc" argument. Arguments: other: Another Agenda, to be intersected with this one desc: If provided, this string becomes the title of all the appointments in the result. """ default_desc = (desc == "") result = Agenda() for thisappt in self.appts: if default_desc: desc = thisappt.desc for otherappt in other.appts: if thisappt.overlaps(otherappt): result.append(thisappt.intersect(otherappt,desc)) return result def normalize(self): """Merge overlapping events in an agenda. For example, if the first appointment is from 1pm to 3pm, and the second is from 2pm to 4pm, these two are merged into an appt from 1pm to 4pm, with a combination description. After normalize, the agenda is in order by date and time, with no overlapping appointments. """ if len(self.appts) == 0: return ordering = lambda ap: ap.begin self.appts.sort(key=ordering) normalized = [ ] # print("Starting normalization") cur = self.appts[0] for appt in self.appts[1:]: if appt > cur: # Not overlapping # print("Gap - emitting ", cur) normalized.append(cur) cur = appt else: # Overlapping # print("Merging ", cur, "\n"+ # "with ", appt) cur = cur.union(appt) # print("New cur: ", cur) # print("Last appt: ", cur) normalized.append(cur) self.appts = normalized def normalized(self): """ A non-destructive normalize (like "sorted(l)" vs "l.sort()"). Returns a normalized copy of this agenda. """ copy = Agenda() copy.appts = self.appts copy.normalize() return copy def complement(self, freeblock): """Produce the complement of an agenda within the span of a timeblock represented by an appointment. For example, if this agenda is a set of appointments, produce a new agenda of the times *not* in appointments in a given time period. Args: freeblock: Looking for time blocks in this period that are not conflicting with appointments in this agenda. Returns: A new agenda containing exactly the times that are within the period of freeblock and not within appointments in this agenda. The description of the resulting appointments comes from freeblock.desc. """ copy = self.normalized() comp = Agenda() desc = freeblock.desc cur_time = freeblock.begin for appt in copy.appts: if appt < freeblock: continue if appt > freeblock: if cur_time < freeblock.end: comp.append(Appt(cur_time,freeblock.end, desc)) cur_time = freeblock.end break if cur_time < appt.begin: # print("Creating free time from", cur_time, "to", appt.begin) comp.append(Appt(cur_time, appt.begin, desc)) cur_time = max(appt.end,cur_time) if cur_time < freeblock.end: # print("Creating final free time from", cur_time, "to", freeblock.end) comp.append(Appt(cur_time, freeblock.end, desc)) return comp def __len__(self): """Number of appointments, callable as built-in len() function""" return len(self.appts) def __iter__(self): """An iterator through the appointments in this agenda.""" return self.appts.__iter__() def __str__(self): """String representation of a whole agenda""" rep = "" for appt in self.appts: rep += str(appt) + "\n" return rep[:-1] def __eq__(self, other): """Equality, ignoring descriptions --- just equal blocks of time""" if len(self.appts) != len(other.appts): return False for i in range(len(self.appts)): mine = self.appts[i] theirs = other.appts[i] if not (mine.begin == theirs.begin and mine.end == theirs.end): return False return True
{ "repo_name": "Iseis/meetMe", "path": "agenda.py", "copies": "1", "size": "11848", "license": "artistic-2.0", "hash": 7486518665533952000, "line_mean": 30.5106382979, "line_max": 140, "alpha_frac": 0.5535111411, "autogenerated": false, "ratio": 4.235967107615302, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00831980175776613, "num_lines": 376 }
""" An agent does actions in a game. """ import sys class Base: """ Agent interface. """ def optional_enemy(self, visible, enemy): """ Returns boolean """ raise NotImplementedError() def select_enemy(self, visible, enemies): """ Returns index of selected enemy. """ raise NotImplementedError() def select_card(self, visible, cards): """ Returns index of selected card. """ raise NotImplementedError() def battle(self, visible, enemy): """ Generates battle actions. """ raise NotImplementedError() class File(Base): """ An agent by given input file and output file. """ def __init__(self, in_file, out_file): super().__init__() self._i_file = in_file self._o_file = out_file def _input(self): """ Get trimmed input line. """ line = self._i_file.readline() return line.strip() if line is not None else None def _print(self, string): """ Print string in a line. """ self._o_file.write("{}\n".format(string)) def optional_enemy(self, visible, enemy): self._print("Optional enemy [Y/N]:") # TODO Print enemy information. string = self._input() if string == "Y": return True elif string == "N": return False else: self._print("Not a valid option.") return None def _select(self, visible, items): """ Select an item. """ # TODO Print item information. try: idx = int(self._input()) items[idx] return idx except ValueError: self._print("Not a valid index.") return None except IndexError: self._print("Index out of range.") return None def select_enemy(self, visible, enemies): for idx, enemy in enumerate(enemies): self._print("[{}] {}".format(idx, enemy)) self._print("Select enemy <index>:") return self._select(visible, enemies) def select_card(self, visible, cards): self._print("Select card <index>:") return self._select(visible, cards) def battle(self, visible, enemy): while True: self._print("Select action (draw/use/end):") action = self._input() if action == "draw": self._print("Draw a card, not yet implemented.") yield None elif action == "use": idx = self.select_card(visible, visible.cards) visible.cards[idx].use() self._print("Use a card.") elif action == "end": self._print("End the battle.") self._print("life = {}".format(visible.life)) break else: self._print("Invalid action \"{}\"".format(action)) def console(): return File(sys.stdin, sys.stdout)
{ "repo_name": "cwahbong/tgif-py", "path": "tgif/agent.py", "copies": "1", "size": "3029", "license": "mit", "hash": 971841065123777500, "line_mean": 27.5754716981, "line_max": 67, "alpha_frac": 0.5216242984, "autogenerated": false, "ratio": 4.29645390070922, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0014150943396226414, "num_lines": 106 }
"""An agent is responbible for making a move decision based on a board. Players will make two moves (unless it is the first turn, in which case only one move is allowed) This module will have support for getting all possible moves (or ranges for all types of moves as there are many similar moves of the same type such as placing buildings or stables). A agent only needs to be the following function: make_decision(board, player_index, players, tile_supply, num_moves) - board is the game board - player_index is the index of the current player - current is the player making the move (index) - players are the players in the game - tile_supply the game's tile supply - num_moves is the number of moves the agent can make This function must return a list of moves that the player makes.""" import Move import Board import Market import Player import Tower import Building import Tile import Location import GameConstants TEA_COLOR = GameConstants.BUILDINGS_COLORS[1] def get_all_possible_moves(player, board): """Gets all the differnt VALID moves that a player can be made in a board. If there are no valid moves, a move of NONE_POSSIBLE will be returned.""" possible = [] for color in GameConstants.BUILDINGS_COLORS: for loc in Board.get_building_piece_locations(board, color): if(Player.get_held_buildings_of_color(player, color) > 0): possible.append(Move.make_move(Player.get_player_name(player), Move.NORMAL, Move.BUILDING, loc, color)) building = Board.get_active_building(board, color) has_claimed = False for building in Board.get_buildings_by_color(board, color): if Building.get_owner(building) == Player.get_player_name(player): has_claimed = True if not has_claimed and Board.get_active_building(board, color) != None and \ not Building.has_owner(Board.get_active_building(board, color)): for loc in Building.get_building_locations(building): possible.append(Move.make_move(Player.get_player_name(player), Move.NORMAL, Move.ROOFTOP, loc, color)) for loc in Board.get_stable_piece_location(board): if(Player.get_num_stables(player) > 0): possible.append(Move.make_move(Player.get_player_name(player), Move.NORMAL, Move.STABLE, loc, color)) for loc in Board.get_merchant_place_locations(board): if(Player.get_held_merchants(player) > 0): possible.append(Move.make_move(Player.get_player_name(player), Move.NORMAL, Move.MERCHANT, loc)) for loc in Tower.get_possible_wall_additions(Board.get_towers(board)): if(Player.get_held_walls(player) > 0): possible.append(Move.make_move(Player.get_player_name(player), Move.NORMAL, Move.WALL, loc)) has_tea = False for tile in Player.get_tiles(player): if Tile.get_tile_type(tile) == Tile.TEA_TILE: has_tea = True if has_tea: possible.append(Move.make_move(Player.get_player_name(player), Move.PASS)) if not possible: possible.append(Move.make_move(Player.get_player_name(player), Move.NONE_POSSIBLE)) return possible def can_make_move(board, player): """Checks if a player can make a move""" for color in GameConstants.BUILDINGS_COLORS: if len(Board.get_building_piece_locations(board, color)): if(Player.get_held_buildings_of_color(player, color) > 0): return True building = Board.get_active_building(board, color) has_claimed = False for building in Board.get_buildings_by_color(board, color): if Building.get_owner(building) == Player.get_player_name(player): has_claimed = True if not has_claimed and building != None: return True if len(Board.get_stable_piece_location(board)): if(Player.get_num_stables(player) > 0): return True if len(Board.get_merchant_place_locations(board)): if(Player.get_held_merchants(player) > 0): return True if len(Tower.get_possible_wall_additions(Board.get_towers(board))): if(Player.get_held_walls(player) > 0): return True if len(Player.get_tiles_of_type(player, Tile.TEA_TILE)): return True return False def is_valid_move(move, board, player): """Checks if a move is valid for a given player""" move_type = Move.get_move_type(move) if move_type == Move.NONE_POSSIBLE: return not can_make_move(board, player) if move_type == Move.PASS: return len(Player.get_tiles_of_type(player, Tile.TEA_TILE)) > 0 else: piece = Move.get_piece(move) move_loc = Move.get_location(move) if piece == Move.BUILDING: move_color = Move.get_move_color(move) locs = list(Board.get_building_piece_locations(board, move_color)) locs.sort() return Player.get_held_buildings_of_color(player, move_color) > 0 and \ move_loc in Board.get_building_piece_locations(board, move_color) elif piece == Move.STABLE: return Player.get_num_stables(player) > 0 and \ move_loc in Board.get_stable_piece_location(board) elif piece == Move.MERCHANT: return Player.get_held_merchants(player) > 0 and \ move_loc in Board.get_merchant_place_locations(board) elif piece == Move.ROOFTOP: if Player.get_held_rooftops(player) <= 0: return False move_color = Move.get_move_color(move) claimed_building = Board.get_active_building(board, move_color) if claimed_building == None: return False if move_loc not in Building.get_building_locations(claimed_building): return False for building in Board.get_buildings_by_color(board, move_color): if Building.get_owner(building) == Player.get_player_name(player): return False return True elif piece == Move.WALL: return Player.get_held_walls(player) > 0 and move_loc in Tower.get_possible_wall_additions(Board.get_towers(board)) return False def apply_move(move, board, tile_supply, player_index, players): """Applys a given move to a board and returns a new board with the move applyed to it. The original board remains unchanged.""" def get_tile_from_supply(tile_supply, tile_type, value=0): """Gets a tile from the supply""" for i in range(len(tile_supply)): tile = tile_supply[i] if Tile.get_tile_value(tile) == value and Tile.get_tile_type(tile) == tile_type: return tile_supply.pop(i) return None def get_tile_from_all(players, tile_type, value=0): """Gets a tile from any of the players, returns the first tile found""" for other in players: taken = Player.take_tile(other, tile_type, value) if taken != None: return taken return None def get_tile_from_others(players, player_index, tile_type, value=0): """Gets a tile from other players, player_index is the current player""" for other in players: if other != players[player_index]: taken = Player.take_tile(other, tile_type, value) if taken != None: return taken return None def get_buildings_adjacent_to_tower(towers, num, board): """Gets all the buildings adjacent to the tower's walls""" walls = set(Tower.get_wall_locations_for_tower(towers, num - 1)) def is_building_adj(building): """Checks if a building is adjacent to the tower""" for loc in Building.get_building_stable_orthogonal(building): if loc in walls: return True return False adj = [] for building in Board.get_buildings(board): if is_building_adj(building): adj.append(building) return adj def get_player_with_name(players, name): """Gets a player with a given name and returns none if there are no players with the given name.""" for player in players: if Player.get_player_name(player) == name: return player return None def get_adj_towers_to_building(towers, building): """Gets all the towers adjacent to the building. Returns the tower numbers of the adjacent towers as a list.""" building_adj = set(Building.get_building_stable_orthogonal(building)) def is_adj_to_tower(num): """Checks if a given building is adjacent to the tower of number tower_num from towers""" tower_adj = Tower.get_wall_locations_for_tower(towers, num - 1) for wall in tower_adj: if wall in building_adj: return True return False connected = [] for num in range(1, 5): if is_adj_to_tower(num): connected.append(num) return connected board = Board.clone_board(board) tile_supply = [Tile.clone_tile(tile) for tile in tile_supply] players = [Player.clone_player(player) for player in players] player = players[player_index] if Move.get_move_type(move) == Move.NONE_POSSIBLE: pass elif Move.get_move_type(move) == Move.PASS: Player.lose_tile(player, Tile.TEA_TILE, 0) else: piece = Move.get_piece(move) loc = Move.get_location(move) if piece == Move.BUILDING: color = Move.get_move_color(move) Player.play_building(player, color) if Board.get_active_building(board, color) == None: Board.start_new_building(board, loc, color) else: Building.attach_building_locations(Board.get_active_building(board, color), loc) elif piece == Move.STABLE: Player.play_stable(player) for building in Board.get_buildings(board): if loc in Building.get_building_peice_attach(building): before_adj = get_adj_towers_to_building(Board.get_towers(board), building) Building.attach_stable_location(building, loc) after_adj = get_adj_towers_to_building(Board.get_towers(board), building) new_towers = [] for t in after_adj: if t not in before_adj: new_towers.append(t) if Building.has_owner(building): new_owner = Building.get_owner(building) for num in set(new_towers): #we have a new owner tile = get_tile_from_supply(tile_supply, Tile.TOWER_TILE, num) if tile == None: tile = get_tile_from_all(players, Tile.TOWER_TILE, num) if new_owner == Building.NEUTRAL_OWNER: tile_supply.append(tile) else: new_owner = get_player_with_name(players, new_owner) Player.give_tile(new_owner, tile) elif piece == Move.MERCHANT: Player.play_merchant(player) Market.add_merchant_to_market(Board.get_market(board), loc) elif piece == Move.ROOFTOP: color = Move.get_move_color(move) Player.play_rooftop(player) claimed_building = Board.get_active_building(board, color) Building.assign_owner(claimed_building, Player.get_player_name(player), Player.get_player_color(player), loc) Board.get_buildings_by_color(board, color) claimed = [] size = len(Building.get_building_locations(claimed_building)) is_largest = True for building in Board.get_buildings_by_color(board, color): if Building.has_owner(building) and Building.get_owner(building) != Building.NEUTRAL_OWNER: claimed.append(Building.get_owner(building)) if claimed_building != building and len(Building.get_building_locations(building)) >= size: is_largest = False if len(claimed) == len(players): for player in players: Player.remove_all_buildings_of_color(player, color) if is_largest: from_supply = get_tile_from_supply(tile_supply, Tile.PALACE_TILE, Tile.PALACE_VALUES[color]) if from_supply != None: Player.give_tile(player, from_supply) else: from_others = get_tile_from_others(players, player_index, Tile.PALACE_TILE, Tile.PALACE_VALUES[color]) Player.give_tile(player, from_others) if color == TEA_COLOR: num = len(Board.get_buildings_by_color(board, TEA_COLOR)) for i in range(4 - num): Player.give_tile(player, get_tile_from_supply(tile_supply, Tile.TEA_TILE)) adj = set(get_adj_towers_to_building(Board.get_towers(board), claimed_building)) for tower_num in adj: tile = get_tile_from_supply(tile_supply, Tile.TOWER_TILE, tower_num) if tile == None: tile = get_tile_from_all(players, Tile.TOWER_TILE, tower_num) Player.give_tile(player, tile) elif piece == Move.WALL: towers = Board.get_towers(board) Player.play_wall(player) for num in range(1, 5): tower = Tower.get_tower(towers, num) added = False before_adj = set() if loc == (Tower.get_tower_addition_c(towers, num)): before_adj = get_buildings_adjacent_to_tower(Board.get_towers(board), num, board) Tower.add_tower_c(tower) added = True elif loc == (Tower.get_tower_addition_r(towers, num)): before_adj = get_buildings_adjacent_to_tower(Board.get_towers(board), num, board) Tower.add_tower_r(tower) added = True if added: buildings = get_buildings_adjacent_to_tower(Board.get_towers(board), num, board) new_buildings = [] for b in buildings: if b not in before_adj: new_buildings.append(b) if len(new_buildings) > 0: #we have a new owner new_building = new_buildings[0] if Building.has_owner(new_building): new_owner = Building.get_owner(new_building) tile = get_tile_from_supply(tile_supply, Tile.TOWER_TILE, num) if tile == None: tile = get_tile_from_all(players, Tile.TOWER_TILE, num) if new_owner == Building.NEUTRAL_OWNER: tile_supply.append(tile) else: new_owner = get_player_with_name(players, new_owner) Player.give_tile(new_owner, tile) return board, players, tile_supply def get_agent_moves(agent, board, current, tile_supply, players, num_moves=2): """Gets the moves made by an agent for his/her/it's turn.""" return agent(board, current, players, tile_supply, num_moves) def get_random_agent(): """A random agent for testing the functionality of the agent.""" import random def make_moves(board, player_index, players, tile_supply, num_moves): moves = [] for i in range(num_moves): move = random.choice(get_all_possible_moves(players[player_index], board)) board, players, tile_supply = apply_move(move, board, tile_supply, player_index, players) moves.append(move) return moves; return make_moves;
{ "repo_name": "nicholas-maltbie/Medina", "path": "Agent.py", "copies": "1", "size": "16312", "license": "mit", "hash": 4664186159593956000, "line_mean": 46.9764705882, "line_max": 127, "alpha_frac": 0.5891981363, "autogenerated": false, "ratio": 3.845355964167845, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4934554100467845, "avg_score": null, "num_lines": null }
"""An agent that can restore and run a policy learned by PPO.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from pybullet_envs.agents.ppo import normalize from pybullet_envs.agents import utility class SimplePPOPolicy(object): """A simple PPO policy that is independent to the PPO infrastructure. This class restores the policy network from a tensorflow checkpoint that was learned from PPO training. The purpose of this class is to conveniently visualize a learned policy or deploy the learned policy on real robots without need to change the PPO evaluation infrastructure: https://cs.corp.google.com/piper///depot/google3/robotics/reinforcement_learning/agents/scripts/visualize.py. """ def __init__(self, sess, env, network, policy_layers, value_layers, checkpoint): self.env = env self.sess = sess observation_size = len(env.observation_space.low) action_size = len(env.action_space.low) self.observation_placeholder = tf.placeholder(tf.float32, [None, observation_size], name="Input") self._observ_filter = normalize.StreamingNormalize(self.observation_placeholder[0], center=True, scale=True, clip=5, name="normalize_observ") self._restore_policy(network, policy_layers=policy_layers, value_layers=value_layers, action_size=action_size, checkpoint=checkpoint) def _restore_policy(self, network, policy_layers, value_layers, action_size, checkpoint): """Restore the PPO policy from a TensorFlow checkpoint. Args: network: The neural network definition. policy_layers: A tuple specify the number of layers and number of neurons of each layer for the policy network. value_layers: A tuple specify the number of layers and number of neurons of each layer for the value network. action_size: The dimension of the action space. checkpoint: The checkpoint path. """ observ = self._observ_filter.transform(self.observation_placeholder) with tf.variable_scope("network/rnn"): self.network = network(policy_layers=policy_layers, value_layers=value_layers, action_size=action_size) with tf.variable_scope("temporary"): self.last_state = tf.Variable(self.network.zero_state(1, tf.float32), False) self.sess.run(self.last_state.initializer) with tf.variable_scope("network"): (mean_action, _, _), new_state = tf.nn.dynamic_rnn(self.network, observ[:, None], tf.ones(1), self.last_state, tf.float32, swap_memory=True) self.mean_action = mean_action self.update_state = self.last_state.assign(new_state) saver = utility.define_saver(exclude=(r"temporary/.*",)) saver.restore(self.sess, checkpoint) def get_action(self, observation): normalized_observation = self._normalize_observ(observation) normalized_action, _ = self.sess.run( [self.mean_action, self.update_state], feed_dict={self.observation_placeholder: normalized_observation}) action = self._denormalize_action(normalized_action) return action[:, 0] def _denormalize_action(self, action): min_ = self.env.action_space.low max_ = self.env.action_space.high action = (action + 1) / 2 * (max_ - min_) + min_ return action def _normalize_observ(self, observ): min_ = self.env.observation_space.low max_ = self.env.observation_space.high observ = 2 * (observ - min_) / (max_ - min_) - 1 return observ
{ "repo_name": "MTASZTAKI/ApertusVR", "path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/simple_ppo_agent.py", "copies": "2", "size": "4198", "license": "mit", "hash": -5634956141573160000, "line_mean": 44.1397849462, "line_max": 111, "alpha_frac": 0.595521677, "autogenerated": false, "ratio": 4.382045929018789, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5977567606018789, "avg_score": null, "num_lines": null }
# Anagram another word. import collections ANAGRAM_INDICATORS = frozenset([ 'about', 'awful', 'bad', 'badly', 'break', 'round', 'crash', 'crazy', 'confuse', 'destruction', 'downfall', 'from', 'kink', 'make', 'mess', 'odd', 'oddly', 'off', 'out', 'put', 'roll', 'round', 'shake', 'transform', 'turn', 'upset', 'wander', 'wandering', ]) # Solution appears in clue. EMBEDDED_INDICATORS = frozenset([ 'bit', 'circling', 'in', 'part', 'partly', ]) # Synonym sounds like solution. HOMOPHONE_INDICATORS = frozenset([ 'hear', 'say', ]) # First letter. INITIAL_INDICATORS = frozenset([ 'first', 'initial', 'initially', 'lead', ]) # Join words. CONCATENATE_INDICATORS = frozenset([ 'and', 'by', 'put', 'with', ]) # Insert a word into another word. INSERT_INDICATORS = frozenset([ 'stuck', 'in', ]) # Front and back letters. EDGES_INDICATORS = frozenset([ 'edge', ]) # Reversing words. REVERSAL_INDICATORS = frozenset([ 'turn', 'turning', 'back', 'backward', ]) # Ambiguous definition (e.g., double entendre). AMBIGUOUS_INDICATORS = frozenset([ '?' ]) # Shorthand conversions. # TODO: This doesn't scale. Need a synonym list. SHORTHAND_CONVERSIONS = { 'american': ['us', 'usa'], 'attempt': ['try'], 'father': ['pa'], 'fathers': ['pas'], 'foot': ['ft'], 'good': ['g'], 'right': ['r'], 'left': ['l'], 'microphone': ['mic'], } # "Set of books" = OT (old testament) or NT (new testament). ALL_INDICATORS = collections.defaultdict(list) for group in [ ANAGRAM_INDICATORS, EMBEDDED_INDICATORS, HOMOPHONE_INDICATORS, INITIAL_INDICATORS, CONCATENATE_INDICATORS, INSERT_INDICATORS, EDGES_INDICATORS, REVERSAL_INDICATORS, ]: for indicator in group: ALL_INDICATORS[indicator].append(group)
{ "repo_name": "PhilHarnish/forge", "path": "src/data/alphabets/cryptic_keywords.py", "copies": "1", "size": "1802", "license": "mit", "hash": -7530059613889115000, "line_mean": 16.4951456311, "line_max": 64, "alpha_frac": 0.6182019978, "autogenerated": false, "ratio": 2.8467614533965246, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3964963451196525, "avg_score": null, "num_lines": null }
"""Anagram detecting problem. Two strings are anagram if one is simply a rearrangement of the other. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function def anagram_iter(s1, s2): """Anagram by iteration. Time complexity: O(n^2). Space complexity: O(n). """ if len(s1) != len(s2): return False # Make list of s2 for search memoization. l2 = list(s2) for c1 in s1: is_found = False for i2, c2 in enumerate(s2): if c2 == c1: # If c1 is found as a char c2 in s2, # update its l2 for not duplicating search. l2[i2] = None is_found = True break # Confirm c1 is found in any char of s2. if not is_found: return False return True def anagram_sort(s1, s2): """Anagram by sorting. Time complexity: O(nlogn). Space complexity: O(n). """ if len(s1) != len(s2): return False # Sort lists of s1 and s2. l1 = list(s1) l2 = list(s2) l1.sort() l2.sort() for i in range(len(l1)): if l1[i] != l2[i]: return False return True def anagram_count(s1, s2): """Anagram by counting. Time complexity: O(n). Space complexity: O(1). """ if len(s1) != len(s2): return False # Use 26 chars from a to z to store s1/s2's char numbers. c1 = [0] * 26 c2 = [0] * 26 for i in range(len(s1)): pos = ord(s1[i]) - ord('a') c1[pos] += 1 for i in range(len(s2)): pos = ord(s2[i]) - ord('a') c2[pos] += 1 for i in range(26): if c1[i] != c2[i]: return False return True def main(): import time # Output: True. s1 = 'abcd' s2 = 'dcba' start_time = time.time() print('By iter: {}'.format(anagram_iter(s1, s2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By sort: {}'.format(anagram_sort(s1, s2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By count: {}'.format(anagram_count(s1, s2))) print('Time: {}'.format(time.time() - start_time)) # Output: False. # s1 = 'abcd' # s2 = 'aabc' s1 = 'abcd' s2 = 'abc' start_time = time.time() print('By iter: {}'.format(anagram_iter(s1, s2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By sort: {}'.format(anagram_sort(s1, s2))) print('Time: {}'.format(time.time() - start_time)) start_time = time.time() print('By count: {}'.format(anagram_count(s1, s2))) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
{ "repo_name": "bowen0701/algorithms_data_structures", "path": "alg_anagram.py", "copies": "1", "size": "2843", "license": "bsd-2-clause", "hash": 7038626322420975000, "line_mean": 21.0387596899, "line_max": 70, "alpha_frac": 0.5290186423, "autogenerated": false, "ratio": 3.0935799782372144, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9090065603023576, "avg_score": 0.006506603502727534, "num_lines": 129 }
"""Anagram finding functions.""" from nagaram.scrabble import blank_tiles, word_list, word_score def _letter_map(word): """Creates a map of letter use in a word. Args: word: a string to create a letter map from Returns: a dictionary of {letter: integer count of letter in word} """ lmap = {} for letter in word: try: lmap[letter] += 1 except KeyError: lmap[letter] = 1 return lmap def anagrams_in_word(word, sowpods=False, start="", end=""): """Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word """ input_letters, blanks, questions = blank_tiles(word) for tile in start + end: input_letters.append(tile) for word in word_list(sowpods, start, end): lmap = _letter_map(input_letters) used_blanks = 0 for letter in word: if letter in lmap: lmap[letter] -= 1 if lmap[letter] < 0: used_blanks += 1 if used_blanks > (blanks + questions): break else: used_blanks += 1 if used_blanks > (blanks + questions): break else: yield (word, word_score(word, input_letters, questions))
{ "repo_name": "a-tal/nagaram", "path": "nagaram/anagrams.py", "copies": "1", "size": "1632", "license": "bsd-3-clause", "hash": 686752848658290400, "line_mean": 26.6610169492, "line_max": 72, "alpha_frac": 0.5575980392, "autogenerated": false, "ratio": 3.9420289855072466, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9999627024707247, "avg_score": 0, "num_lines": 59 }
"""Anagramic squares Problem 98 By replacing each of the letters in the word CARE with 1, 2, 9, and 6 respectively, we form a square number: 1296 = 362. What is remarkable is that, by using the same digital substitutions, the anagram, RACE, also forms a square number: 9216 = 962. We shall call CARE (and RACE) a square anagram word pair and specify further that leading zeroes are not permitted, neither may a different letter have the same digital value as another letter. Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, find all the square anagram word pairs (a palindromic word is NOT considered to be an anagram of itself). What is the largest square number formed by any member of such a pair? NOTE: All anagrams formed must be contained in the given text file. """ from eulerlib import generateCombinations, isPerfectSquare import itertools dict = {} def loadDict(): # Read words from file. All are on a single line, delimited by " and split by , file = open("words.txt") words = file.readline().strip().replace("\"","").split(",") file.close() # Create a hash table mapping all words that are anagrams for w in words: k = "".join(sorted(list(w))) if k in dict: dict[k].append(w) else: dict[k] = [w] # Remove words that have no anagrams for k in list(dict.keys()): if len(dict[k]) == 1: dict.pop(k) elif len(dict[k]) == 3: #special case, convert it into pairs a, b, c = dict[k] dict.pop(k) dict[a] = [a,b] dict[b] = [b,c] dict[c] = [a,c] def checkPair(a, b): # get letters to substitute letters = "".join(sorted(list(a))) # generate all permutations for replacing permutations = itertools.permutations(list(range(10)), len(letters)) for numbers in permutations: # replace each letter for a correspondingnumber s = a t = b for i in range(len(letters)): s = s.replace(letters[i], str(numbers[i])) t = t.replace(letters[i], str(numbers[i])) i = int(s) j = int(t) # check if numbers have same length (to eliminate leading zeroes) if len(str(i)) == len(str(j)): if isPerfectSquare(i) and isPerfectSquare(j): print(a, b, i, j) return max(i,j) return None loadDict() m = 0 for p in dict.values(): a = p[0] b = p[1] r = checkPair(a,b) if r != None and r > m: m = r print(m)
{ "repo_name": "feliposz/project-euler-solutions", "path": "python/euler98.py", "copies": "1", "size": "2628", "license": "mit", "hash": 9150874705929923000, "line_mean": 30.2857142857, "line_max": 83, "alpha_frac": 0.6133942161, "autogenerated": false, "ratio": 3.5370121130551815, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4650406329155181, "avg_score": null, "num_lines": null }
from itertools import permutations import argparse wordSet = set() # Strip whitespace from a string and lowercase it def strip_and_lower(s): return s.lower().strip() # Return true if the lowercased/stripped inputWord appears # in the global wordSet def dictionaryLookup(inputWord): return(strip_and_lower(inputWord) in wordSet) # Given an input word, returns all permutations of the word found # in the input dictionary def findAnagrams(inputWord): # Get a list of permutations of the characters in the input word perms = set(''.join(p) for p in permutations(inputWord)) perms.remove(inputWord) # anagrams is just the sublist of perms that returns true # on a call to dictionaryLookup() anagrams = list(p for p in perms if dictionaryLookup(p)) return anagrams def main(words, dictFilename): # Read the words from our dictionary file into wordSet dictFile = open(dictFilename, 'r') global wordSet wordSet = set(strip_and_lower(w) for w in dictFile.read().splitlines()) dictFile.close() # Find the anagrams for every string given as input to the script for w in words: print(w + ": ", end="") anagrams = findAnagrams(w) for a in anagrams: print(a, end=", ") # Get rid of the dangling ", " if necessary if len(anagrams) > 0: print("\b\b ", end="") print("") if __name__ == "__main__": parser = argparse.ArgumentParser( description='Prints the case-insensitive anagrams of words.') parser.add_argument('words', metavar='W', type=str, nargs='+', help='word you want the anagrams for') parser.add_argument('-d', dest='dictionary', type=str, default="/usr/share/dict/words", help='path to dictionary file (default: /usr/share/dict/words)') args = parser.parse_args() main(args.words, args.dictionary)
{ "repo_name": "skotchandsoda/yay-anagrams", "path": "anagram.py", "copies": "1", "size": "2096", "license": "bsd-3-clause", "hash": -3369552010673821700, "line_mean": 33.3606557377, "line_max": 76, "alpha_frac": 0.6626908397, "autogenerated": false, "ratio": 3.7562724014336917, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9902079966589354, "avg_score": 0.0033766549088673856, "num_lines": 61 }
"""An aircraft electric system specification generator. Generation for Yices and TuLiP were presented in a HSCC 2013 paper. Huan Xu (mumu@caltech.edu) October 30, 2012 Generation into SMT-LIB v2 language by SCL, 27 January 2013 """ import sys, os import re, copy import numpy as np import scipy import scipy.io import networkx as nx import itertools import time #************************************************************************************************************************************** def write_envgen(genlist): """Declares generator environment variable Parameters ---------- genlist : list of all generators """ for i in genlist: f.write('env_vars['"'"'g'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_envru(rulist): """Declares rectifier unit environment variable Parameters ---------- rulist : list of all rectifier units """ for i in rulist: f.write('env_vars['"'"'ru'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusspec(essbuslist,time): """Writes counter for essential buses Parameters ---------- essbuslist : list of buses with essential loads time: int max time bus can be unpowered """ for i in essbuslist: f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=0) -> (next(countb'+str(i)+') = countb'+str(i)+'+1))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=1) -> (next(countb'+str(i)+') = 0))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[](countb'+str(i)+' <= '+str(time)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusdisc(essbuslist,time): """Declares bus counter system variable Parameters ---------- essbuslist : list of all essential buses time: int max time bus can be unpowered """ for i in essbuslist: f.write('disc_sys_vars['"'"'countb'+str(i)+"'"+'] = [x for x in range(0,'+str(time+1)+')]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discbus(buslist): """Declares bus system variables Parameters ---------- buslist : list of all buses """ for i in buslist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discnull(nullist): """Declares null node system variables Parameters ---------- nullist : list of all null nodes """ for i in nullist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discdc_con(G,rulist,dcbuslist,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() # print edges for i in range(0,len(edges)): # print edges[i][0] # print edges[i][1] f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') for j in range(0,len(remove2)): f.write('disc_sys_vars['"'"'c'+str(remove2[j][0])+str(remove2[j][1])+"'"'] = [1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discac_con(G,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] H = copy.deepcopy(G) if len(nullist) >= 2: remove = all_pairs(nullist) H.remove_edges_from(remove) edges = H.edges() for i in range(0,len(edges)): if edges[i][0] < edges[i][1]: f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') else: f.write('disc_sys_vars['"'"'c'+str(edges[i][1])+str(edges[i][0])+"'"'] = [0,1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def g_disconnect(G,genlist,buslist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators buslist: list of all ac buses """ for i in genlist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ru_disconnect(G,rulist,buslist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph rulist : list of all rectifiers buslist: list of all ac buses """ for i in rulist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def has_path(G, source, target): """Return True if G has a path from source to target, False otherwise. Parameters ---------- G : NetworkX graph source : node Starting node for path target : node Ending node for path """ try: sp = nx.shortest_path(G,source, target) except nx.NetworkXNoPath: return False return True #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_pairs(gens): """Returns list of all generator pairs (potential parallels). Parameters ---------- gens : list list of all generator nodes """ answer = [] for i in range(len(gens)): for j in range(i+1, len(gens)): if (gens[i],gens[j]) not in answer: answer.append((gens[i],gens[j])) return answer #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_gens(list,G): """Finds all generator pairs that are connected through graph. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs """ pgens = [] for i in range(len(list)): if has_path(G,list[i][0], list[i][1]) is True: pgens.append(list[i]) return pgens #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ppaths(i,j,G): """Finds all contactors between two parallel sources. Converts to LTL. Parameters ---------- G : NetworkX graph source i : node Starting node for path target j : node Ending node for path """ result = nx.shortest_path(G,source=i,target=j) C = {} guarantees = 'guarantees += '"'"'&\\n\\t[](!(' for k in range(0,len(result)-1): if result[k] < result[k+1]: C[k] = 'c'+str(result[k])+str(result[k+1]) else: C[k] = 'c'+str(result[k+1])+str(result[k]) guarantees = guarantees + '('+str(C[0])+'=1)' for m in range(1,len(C)): guarantees = guarantees+ ' & ('+str(C[m])+'=1)' guarantees = guarantees + '))'"'" return guarantees #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def noparallel(G,list): """Writes non-paralleling specifications to file. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs with paths in G """ sourcetemp = (all_pairs(list)) source = all_gens(sourcetemp,G) for i in range(0,len(source)): mat = ppaths(source[i][0], source[i][1], G) f.write(mat) f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def faulttol(prob,allgens,genfail): """Finds all combinations of failures. Parameters ---------- prob: int probability up to which failure can occur allgens: list list of all components that can fail genfail: int probability of failure of single component """ tuples = int(prob/genfail) fails = [] temp = [] if tuples <= 1: fails = allgens[:] else: fails = allgens[:] for i in range(2,tuples+1): for temp in itertools.combinations(allgens,i): fails.append(temp) return fails #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_genassump(genfail,genlist): """Writes generator environment assumption Parameters ---------- genfail : int how many generators may fail at once time genlist : list of all generators """ f.write('assumptions += '"'"'&\\n\\t[]((g'+str(genlist[0])) for i in range(1,len(genlist)): f.write(' + g'+str(genlist[i])) f.write(') >= '+str(len(genlist)-genfail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_ruassump(rufail,rulist): """Writes rectifier environment assumption Parameters ---------- rufail : int how many rectifiers may fail at once time genlist : list of all generators """ if len(rulist) > 0: f.write('assumptions += '"'"'&\\n\\t[]((ru'+str(rulist[0])) for i in range(1,len(rulist)): f.write(' + ru'+str(rulist[i])) f.write(') >= '+str(len(rulist)-rufail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_ru_edges(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) return pairs #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_rus(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) H.remove_edges_from(pairs) return H #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def buspathnodes(G,busno,source): """Finds determines if path exists from bus to generator. Parameters ---------- busno: int node number for bus sources: list list of all generators G: NetworkX graph """ buspaths = [] if has_path(G,busno,source): buspaths.append((busno,source)) return buspaths #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusprop(G,source,target): paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) for m in gens2: temp.append((source,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for path in nx.all_simple_paths(D,source,target): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass if len(paths[p]) > 2: for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in null: f.write(' & (b'+str(paths[p][j])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusprop(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusspec(G,busno,gen): temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) for m in gens2: temp.append((busno,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) paths = [] for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusspec(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_dcbusalways(buslist): """Writes dc bus specification must always be powered Parameters ---------- buslist : list of all dc buses """ for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](b'+str(i)+' = 1)'"'"+'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_simple_paths_graph(G, source, target, cutoff=None): if cutoff is None: cutoff = len(G)-1 if cutoff < 1: yield [] else: visited = [source] stack = [iter(G[source])] while stack: children = stack[-1] child = next(children, None) if child is None: stack.pop() visited.pop() elif len(visited) < cutoff: if child == target: yield visited + [target] elif child not in visited: visited.append(child) stack.append(iter(G[child])) else: #len(visited) == cutoff: if child == target or target in children: yield visited + [target] stack.pop() visited.pop() def all_simple_paths_multigraph(G, source, target, cutoff=None): if cutoff is None: cutoff = len(G)-1 if cutoff < 1: yield [] else: visited = [source] stack = [(v for u,v in G.edges(source))] while stack: children = stack[-1] child = next(children, None) if child is None: stack.pop() visited.pop() elif len(visited) < cutoff: if child == target: yield visited + [target] elif child not in visited: visited.append(child) stack.append((v for u,v in G.edges(child))) else: #len(visited) == cutoff: count = ([child]+list(children)).count(target) for i in range(count): yield visited + [target] stack.pop() visited.pop() def all_simple_paths(G, source, target, cutoff=None): """Generate all simple paths in the graph G from source to target. Parameters ---------- G : NetworkX graph source : node Starting node for path. target : node Ending node for path. cutoff : integer, optional Depth to stop the search. Only paths of length <= cutoff are returned. Returns ------- path_generator: generator A generator that produces lists of simple paths. """ if G.is_multigraph(): return all_simple_paths_multigraph(G, source, target, cutoff=cutoff) else: return all_simple_paths_graph(G, source, target, cutoff=cutoff) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def dcbusprop(G,source,target): """Creates discrete properties for power status of dc buses Parameters ---------- G : networkX graph source : node dc bus target : node generator """ temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) D.remove_nodes_from(gens2) paths = [] C = [] for path in nx.all_simple_paths(D,source,target,cutoff=None): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): if paths[p][i] in busdc and paths[p][i+1] in rus: pass elif paths[p][i] in rus and paths[p][i+1] in busdc: pass else: C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in rus: f.write('(ru'+str(paths[p][1])+'=1)') elif paths[p][1] in busdc: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in rus: f.write(' & (ru'+str(paths[p][j])+'=1)') elif paths[p][1] in busdc: f.write(' & (b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write(' & (b'+str(paths[p][1])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] def write_dcbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: dcbusprop(G,i,j) def dcbusspec(G,busno,gen): """Creates specifications for when DC bus gets powered Parameters ---------- G : networkX graph busno : node dc bus gen : node generator """ paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') def write_dcbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] for i in buslist: for j in genlist: dcbusspec(G,i,j) def write_dcbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #************************************************************************************************ def write_sat_bool(complist): """Defines boolean components (not including contactors) Parameters ---------- complist : list of all components """ for i in complist: if i in gens: f.write('(define g'+str(i)+'::bool)\n') elif i in busac: f.write('(define b'+str(i)+'::bool)\n') elif i in busdc: f.write('(define b'+str(i)+'::bool)\n') elif i in null: f.write('(define b'+str(i)+'::bool)\n') elif i in rus: f.write('(define r'+str(i)+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_con(G,rulist,dcbuslist,nullist): """Defines contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() for i in range(0,len(edges)): f.write('(define c'+str(edges[i][0])+str(edges[i][1])+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_bool_smtlib2(complist): """Define boolean components (not including contactors) in SMT-LIB Parameters ---------- complist : list of all components """ for i in complist: if i in gens: f.write('(declare-fun g'+str(i)+' () Bool)\n') elif i in busac: f.write('(declare-fun b'+str(i)+' () Bool)\n') elif i in busdc: f.write('(declare-fun b'+str(i)+' () Bool)\n') elif i in null: f.write('(declare-fun b'+str(i)+' () Bool)\n') elif i in rus: f.write('(declare-fun r'+str(i)+' () Bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_con_smtlib2(G,rulist,dcbuslist,nullist): """Define contactors (removes contactors between rus and dcbuses) in SMT-LIB Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() for i in range(0,len(edges)): f.write('(declare-fun c'+str(edges[i][0])+str(edges[i][1])+' () Bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_always(complist): """Asserts boolean components always on/powered Parameters ---------- complist : list of all components """ remove2 = [] for i in complist: if i in busac: f.write('(assert (= b'+str(i)+' true))\n') elif i in busdc: f.write('(assert (= b'+str(i)+' true))\n') elif i in null: f.write('(assert (= b'+str(i)+' true))\n') remove2 = all_pairs(null) edges = G.edges() for j in range(0,len(remove2)): if remove2[j] in edges: f.write('(assert (= c'+str(remove2[j][0])+str(remove2[j][1])+' true))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectgen(G,genlist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] for i in genlist: neighbor = G.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectru(G,rulist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] H = copy.deepcopy(G) H.remove_nodes_from(busdc) for i in rulist: neighbor = H.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_noparallel(G,genlist): pairs = all_pairs(genlist) H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in range(0, len(pairs)): for path in nx.all_simple_paths(H, pairs[i][0], pairs[i][1]): f.write('(assert (not (and ') for j in range(0,len(path)-1): if path[j] < path[j+1]: f.write('(= c'+str(path[j])+str(path[j+1])+' true) ') else: f.write('(= c'+str(path[j+1])+str(path[j])+' true) ') f.write(')))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop1(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) H.remove_nodes_from(rus) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop2(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) # i = buslist[7] # j = genlist[5] for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) H.remove_nodes_from(rus) f.write(')) (= b'+str(i)+ ' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop1(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop2(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) f.write(')) (= b'+str(i)+' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_env(gfail, rfail, lang="yices"): if lang == "smtlib2": postface = ["(check-sat)", "(exit)"] else: # Assume Yices otherwise postface = ["(check)"] gentemp = [x for x in range(0,gfail+1)] rutemp = [x for x in range(0,rfail+1)] allgens = [] allrus = [] env_filename = 'env' count = 0 for i in gentemp: for j in itertools.combinations(gens,len(gens)-i): if len(gens)-i == 1: for k in gens: allgens.append(k) else: allgens.append(j) allgens = list(set(allgens)) if len(rus) == 0: pass else: for i in rutemp: for j in itertools.combinations(rus,len(rus)-i): if len(rus)-i == 1: for k in rus: allrus.append(k) else: allrus.append(j) allrus = list(set(allrus)) for i in range(0,len(allgens)): for j in range(0,len(allrus)): if lang == "smtlib2": env_filename = 'env'+str(count)+'.smt2' else: # Assume Yices otherwise env_filename = 'env'+str(count)+'.ys' f2 = open(env_filename,"w") for k in gens: if isinstance(allgens[i],int): if k == allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') elif k in allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') for m in rus: if isinstance(allrus[j],int): if m == allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') elif m in allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') count = count+1 f2.write("\n".join(postface)+"\n") f2.close() #************************************************************************************************ start = time.time() file_name = 'test_spec' #Load adjacency matrix from matfile data = scipy.io.loadmat('SLD.mat') datatemp = data['A'] # create matrix from matfile A = np.matrix(datatemp) #Failure Probabilities genfail = 1 rufail = 1 busfail = 0 #Node definitions busac = [2,3] busess = [2,3] busdc = [6,7] null = [] rus = [4,5] gens = [0,1] #Bus time nptime = 0 #Create networkx graph from adjacency matrix G=nx.from_numpy_matrix(A) print 'number of edges ' + str(len(G.edges())) print 'number of nodes ' + str(len(G.nodes())) #sets of failure states # fails = faulttol(10,gens,genfail) ################################################################ # Synthesize ################################################################ if ('tulip' in sys.argv): file_name = file_name+'.py' f = open(file_name, "w") # #environment variables print 'writing environment variables' write_envgen(gens) write_envru(rus) #discrete system variables print 'writing discrete system variables: buses' write_discbus(busac) write_discbus(busdc) write_discnull(null) print time.time()-start print 'writing discrete system variables: contactors' # write_discac_con(G,null) #only use this when there are NO DC components write_discdc_con(G,rus,busdc,null) print time.time()-start print 'writing discrete system variables: bus counters' write_essbusdisc(busess,nptime) #acbus discrete properties print 'removing ru paths' H = remove_rus(G,busac,rus) print time.time()-start print 'writing discrete bus properties: AC' write_acbusprop(H,busac,gens) print time.time()-start print 'writing discrete bus properties: DC' write_dcbusprop(G,busdc,gens) print time.time()-start #Environment assumptions print 'writing environment assumptions' write_genassump(genfail,gens) write_ruassump(rufail,rus) print time.time()-start #acbus power guarantees print 'writing bus power specifications: AC' write_acbusspec(H,busac,gens) write_acbusspec2(H,busac,gens) write_essbusspec(busess,nptime) print time.time()-start #disconnect unhealthy guarantees print 'disconnecting unhealthy generators' g_disconnect(G,gens,busac) g_disconnect(G,gens,null) print time.time()-start print 'disconnecting unhealthy rus' ru_disconnect(G,rus,busac) ru_disconnect(G,rus,null) print time.time()-start ##non-parallel guarantees print 'writing no paralleling specs' noparallel(G,gens) print time.time()-start #dc bus power specifications print 'writing bus power specifications: DC' write_dcbusspec(G,busdc,gens) write_dcbusspec2(G,busdc,gens) write_dcbusalways(busdc) f.close() print 'It took', time.time()-start, 'seconds.' ################################################################ # Synthesize ################################################################ if ('yices' in sys.argv) or ('smtlib' in sys.argv): if "smtlib" in sys.argv: file_name = file_name+'.smt2' write_sat_bool = write_sat_bool_smtlib2 write_sat_con = write_sat_con_smtlib2 preface = ["(set-option :print-success false)", "(set-option :produce-models true)", "(set-logic QF_LIA)"] else: file_name = file_name+'.ys' preface = [] f = open(file_name, "w") if len(preface) > 0: f.write("\n".join(preface)+"\n") #Writing component definitions write_sat_bool(gens) write_sat_bool(busac) write_sat_bool(busdc) write_sat_bool(null) write_sat_bool(rus) #Writing contactor definitions write_sat_con(G,rus,busdc,null) #Writing always power bus and null assertions write_sat_always(busac) write_sat_always(busdc) write_sat_always(null) #Writing disconnect implications write_sat_disconnectgen(G,gens) write_sat_disconnectru(G,rus) write_sat_noparallel(G,gens) write_sat_acbusprop1(G,busac,gens) write_sat_acbusprop2(G,busac,gens) write_sat_dcbusprop1(G,busdc, gens) write_sat_dcbusprop2(G,busdc, gens) #write_environment assumptions if "smtlib" in sys.argv: write_sat_env(genfail, rufail, lang="smtlib2") else: write_sat_env(genfail,rufail) f.close() print 'It took', time.time()-start, 'seconds.'
{ "repo_name": "pombredanne/nTLP", "path": "contrib/AES/AES_specgen.py", "copies": "1", "size": "41531", "license": "bsd-3-clause", "hash": 3171836456135973400, "line_mean": 31.44609375, "line_max": 135, "alpha_frac": 0.4561652741, "autogenerated": false, "ratio": 3.6704374723817943, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9543462551334416, "avg_score": 0.01662803902947558, "num_lines": 1280 }
""" An aircraft electric system specification generator presented in the HSCC 2013 paper. Huan Xu (mumu@caltech.edu) October 30, 2012 """ from __future__ import division from __future__ import print_function import sys, os import re, copy import numpy as np import scipy import scipy.io import networkx as nx import itertools import time #************************************************************************************************************************************** def write_envgen(genlist): """Declares generator environment variable Parameters ---------- genlist : list of all generators """ for i in genlist: f.write('env_vars['"'"'g'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_envru(rulist): """Declares rectifier unit environment variable Parameters ---------- rulist : list of all rectifier units """ for i in rulist: f.write('env_vars['"'"'ru'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusspec(essbuslist,time): """Writes counter for essential buses Parameters ---------- essbuslist : list of buses with essential loads time: int max time bus can be unpowered """ for i in essbuslist: f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=0) -> (next(countb'+str(i)+') = countb'+str(i)+'+1))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=1) -> (next(countb'+str(i)+') = 0))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[](countb'+str(i)+' <= '+str(time)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusdisc(essbuslist,time): """Declares bus counter system variable Parameters ---------- essbuslist : list of all essential buses time: int max time bus can be unpowered """ for i in essbuslist: f.write('disc_sys_vars['"'"'countb'+str(i)+"'"+'] = [x for x in range(0,'+str(time+1)+')]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discbus(buslist): """Declares bus system variables Parameters ---------- buslist : list of all buses """ for i in buslist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discnull(nullist): """Declares null node system variables Parameters ---------- nullist : list of all null nodes """ for i in nullist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discdc_con(G,rulist,dcbuslist,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() # print(edges) for i in range(0,len(edges)): # print(edges[i][0]) # print(edges[i][1]) f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') for j in range(0,len(remove2)): f.write('disc_sys_vars['"'"'c'+str(remove2[j][0])+str(remove2[j][1])+"'"'] = [1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discac_con(G,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] H = copy.deepcopy(G) if len(nullist) >= 2: remove = all_pairs(nullist) H.remove_edges_from(remove) edges = H.edges() for i in range(0,len(edges)): if edges[i][0] < edges[i][1]: f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') else: f.write('disc_sys_vars['"'"'c'+str(edges[i][1])+str(edges[i][0])+"'"'] = [0,1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def g_disconnect(G,genlist,buslist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators buslist: list of all ac buses """ for i in genlist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ru_disconnect(G,rulist,buslist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph rulist : list of all rectifiers buslist: list of all ac buses """ for i in rulist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_pairs(gens): """Returns list of all generator pairs (potential parallels). Parameters ---------- gens : list list of all generator nodes """ answer = [] for i in range(len(gens)): for j in range(i+1, len(gens)): if (gens[i],gens[j]) not in answer: answer.append((gens[i],gens[j])) return answer #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_gens(list,G): """Finds all generator pairs that are connected through graph. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs """ pgens = [] for i in range(len(list)): if nx.has_path(G,list[i][0], list[i][1]) is True: pgens.append(list[i]) return pgens #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ppaths(i,j,G): """Finds all contactors between two parallel sources. Converts to LTL. Parameters ---------- G : NetworkX graph source i : node Starting node for path target j : node Ending node for path """ result = nx.shortest_path(G,source=i,target=j) C = {} guarantees = 'guarantees += '"'"'&\\n\\t[](!(' for k in range(0,len(result)-1): if result[k] < result[k+1]: C[k] = 'c'+str(result[k])+str(result[k+1]) else: C[k] = 'c'+str(result[k+1])+str(result[k]) guarantees = guarantees + '('+str(C[0])+'=1)' for m in range(1,len(C)): guarantees = guarantees+ ' & ('+str(C[m])+'=1)' guarantees = guarantees + '))'"'" return guarantees #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def noparallel(G,list): """Writes non-paralleling specifications to file. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs with paths in G """ sourcetemp = (all_pairs(list)) source = all_gens(sourcetemp,G) for i in range(0,len(source)): mat = ppaths(source[i][0], source[i][1], G) f.write(mat) f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def faulttol(prob,allgens,genfail): """Finds all combinations of failures. Parameters ---------- prob: int probability up to which failure can occur allgens: list list of all components that can fail genfail: int probability of failure of single component """ tuples = int(prob / genfail) fails = [] temp = [] if tuples <= 1: fails = allgens[:] else: fails = allgens[:] for i in range(2,tuples+1): for temp in itertools.combinations(allgens,i): fails.append(temp) return fails #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_genassump(genfail,genlist): """Writes generator environment assumption Parameters ---------- genfail : int how many generators may fail at once time genlist : list of all generators """ f.write('assumptions += '"'"'&\\n\\t[]((g'+str(genlist[0])) for i in range(1,len(genlist)): f.write(' + g'+str(genlist[i])) f.write(') >= '+str(len(genlist)-genfail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_ruassump(rufail,rulist): """Writes rectifier environment assumption Parameters ---------- rufail : int how many rectifiers may fail at once time genlist : list of all generators """ if len(rulist) > 0: f.write('assumptions += '"'"'&\\n\\t[]((ru'+str(rulist[0])) for i in range(1,len(rulist)): f.write(' + ru'+str(rulist[i])) f.write(') >= '+str(len(rulist)-rufail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_ru_edges(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) return pairs #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_rus(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) H.remove_edges_from(pairs) return H #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def buspathnodes(G,busno,source): """Finds determines if path exists from bus to generator. Parameters ---------- busno: int node number for bus sources: list list of all generators G: NetworkX graph """ buspaths = [] if nx.has_path(G,busno,source): buspaths.append((busno,source)) return buspaths #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusprop(G,source,target): paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) for m in gens2: temp.append((source,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for path in nx.all_simple_paths(D,source,target): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass if len(paths[p]) > 2: for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in null: f.write(' & (b'+str(paths[p][j])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusprop(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusspec(G,busno,gen): temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) for m in gens2: temp.append((busno,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) paths = [] for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusspec(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_dcbusalways(buslist): """Writes dc bus specification must always be powered Parameters ---------- buslist : list of all dc buses """ for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](b'+str(i)+' = 1)'"'"+'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def dcbusprop(G,source,target): """Creates discrete properties for power status of dc buses Parameters ---------- G : networkX graph source : node dc bus target : node generator """ temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) D.remove_nodes_from(gens2) paths = [] C = [] for path in nx.all_simple_paths(D,source,target,cutoff=None): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): if paths[p][i] in busdc and paths[p][i+1] in rus: pass elif paths[p][i] in rus and paths[p][i+1] in busdc: pass else: C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in rus: f.write('(ru'+str(paths[p][1])+'=1)') elif paths[p][1] in busdc: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in rus: f.write(' & (ru'+str(paths[p][j])+'=1)') elif paths[p][1] in busdc: f.write(' & (b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write(' & (b'+str(paths[p][1])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] def write_dcbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: dcbusprop(G,i,j) def dcbusspec(G,busno,gen): """Creates specifications for when DC bus gets powered Parameters ---------- G : networkX graph busno : node dc bus gen : node generator """ paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') def write_dcbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] for i in buslist: for j in genlist: dcbusspec(G,i,j) def write_dcbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #************************************************************************************************ def write_sat_bool(complist): """Defines boolean components (not including contactors) Parameters ---------- complist : list of all components """ for i in complist: if i in gens: f.write('(define g'+str(i)+'::bool)\n') elif i in busac: f.write('(define b'+str(i)+'::bool)\n') elif i in busdc: f.write('(define b'+str(i)+'::bool)\n') elif i in null: f.write('(define b'+str(i)+'::bool)\n') elif i in rus: f.write('(define r'+str(i)+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_con(G,rulist,dcbuslist,nullist): """Defines contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() for i in range(0,len(edges)): f.write('(define c'+str(edges[i][0])+str(edges[i][1])+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_always(complist): """Asserts boolean components always on/powered Parameters ---------- complist : list of all components """ remove2 = [] for i in complist: if i in busac: f.write('(assert (= b'+str(i)+' true))\n') elif i in busdc: f.write('(assert (= b'+str(i)+' true))\n') elif i in null: f.write('(assert (= b'+str(i)+' true))\n') remove2 = all_pairs(null) edges = G.edges() for j in range(0,len(remove2)): if remove2[j] in edges: f.write('(assert (= c'+str(remove2[j][0])+str(remove2[j][1])+' true))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectgen(G,genlist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] for i in genlist: neighbor = G.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectru(G,rulist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] H = copy.deepcopy(G) H.remove_nodes_from(busdc) for i in rulist: neighbor = H.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_noparallel(G,genlist): pairs = all_pairs(genlist) H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in range(0, len(pairs)): for path in nx.all_simple_paths(H, pairs[i][0], pairs[i][1]): f.write('(assert (not (and ') for j in range(0,len(path)-1): if path[j] < path[j+1]: f.write('(= c'+str(path[j])+str(path[j+1])+' true) ') else: f.write('(= c'+str(path[j+1])+str(path[j])+' true) ') f.write(')))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop1(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) H.remove_nodes_from(rus) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop2(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) # i = buslist[7] # j = genlist[5] for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) H.remove_nodes_from(rus) f.write(')) (= b'+str(i)+ ' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop1(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop2(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) f.write(')) (= b'+str(i)+' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_env(gfail,rfail): gentemp = [x for x in range(0,gfail+1)] rutemp = [x for x in range(0,rfail+1)] allgens = [] allrus = [] env_filename = 'env' count = 0 for i in gentemp: for j in itertools.combinations(gens,len(gens)-i): if len(gens)-i == 1: for k in gens: allgens.append(k) else: allgens.append(j) allgens = list(set(allgens)) if len(rus) == 0: pass else: for i in rutemp: for j in itertools.combinations(rus,len(rus)-i): if len(rus)-i == 1: for k in rus: allrus.append(k) else: allrus.append(j) allrus = list(set(allrus)) for i in range(0,len(allgens)): for j in range(0,len(allrus)): env_filename = 'env'+str(count)+'.ys' f2 = open(env_filename,"w") for k in gens: if isinstance(allgens[i],int): if k == allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') elif k in allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') for m in rus: if isinstance(allrus[j],int): if m == allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') elif m in allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') count = count+1 f2.write('(check)\n') f2.close() #************************************************************************************************ start = time.time() file_name = 'test_spec' #Load adjacency matrix from matfile data = scipy.io.loadmat('SLD.mat') datatemp = data['A'] #Failure Probabilities genfail = 1 rufail = 1 busfail = 0 #Node definitions busac = [2,3] busess = [2,3] busdc = [6,7] null = [] rus = [4,5] gens = [0,1] #Bus time nptime = 0 #Create networkx graph from adjacency matrix G = nx.from_numpy_array(datatemp) print('number of edges ' + str(len(G.edges()))) print('number of nodes ' + str(len(G.nodes()))) #sets of failure states # fails = faulttol(10,gens,genfail) ################################################################ # Synthesize ################################################################ if ('tulip' in sys.argv): file_name = file_name+'.py' f = open(file_name, "w") # #environment variables print('writing environment variables') write_envgen(gens) write_envru(rus) #discrete system variables print('writing discrete system variables: buses') write_discbus(busac) write_discbus(busdc) write_discnull(null) print(time.time()-start) print('writing discrete system variables: contactors') # write_discac_con(G,null) #only use this when there are NO DC components write_discdc_con(G,rus,busdc,null) print(time.time()-start) print('writing discrete system variables: bus counters') write_essbusdisc(busess,nptime) #acbus discrete properties print('removing ru paths') H = remove_rus(G,busac,rus) print(time.time()-start) print('writing discrete bus properties: AC') write_acbusprop(H,busac,gens) print(time.time()-start) print('writing discrete bus properties: DC') write_dcbusprop(G,busdc,gens) print(time.time()-start) #Environment assumptions print('writing environment assumptions') write_genassump(genfail,gens) write_ruassump(rufail,rus) print(time.time()-start) #acbus power guarantees print('writing bus power specifications: AC') write_acbusspec(H,busac,gens) write_acbusspec2(H,busac,gens) write_essbusspec(busess,nptime) print(time.time()-start) #disconnect unhealthy guarantees print('disconnecting unhealthy generators') g_disconnect(G,gens,busac) g_disconnect(G,gens,null) print(time.time()-start) print('disconnecting unhealthy rus') ru_disconnect(G,rus,busac) ru_disconnect(G,rus,null) print(time.time()-start) ##non-parallel guarantees print('writing no paralleling specs') noparallel(G,gens) print(time.time()-start) #dc bus power specifications print('writing bus power specifications: DC') write_dcbusspec(G,busdc,gens) write_dcbusspec2(G,busdc,gens) write_dcbusalways(busdc) f.close() print('It took', time.time()-start, 'seconds.') ################################################################ # Synthesize ################################################################ if ('yices' in sys.argv): file_name = file_name+'.ys' f = open(file_name, "w") #Writing component definitions write_sat_bool(gens) write_sat_bool(busac) write_sat_bool(busdc) write_sat_bool(null) write_sat_bool(rus) #Writing contactor definitions write_sat_con(G,rus,busdc,null) #Writing always power bus and null assertions write_sat_always(busac) write_sat_always(busdc) write_sat_always(null) #Writing disconnect implications write_sat_disconnectgen(G,gens) write_sat_disconnectru(G,rus) write_sat_noparallel(G,gens) write_sat_acbusprop1(G,busac,gens) write_sat_acbusprop2(G,busac,gens) write_sat_dcbusprop1(G,busdc, gens) write_sat_dcbusprop2(G,busdc, gens) #write_environment assumptions write_sat_env(genfail,rufail) f.close() print('It took', time.time()-start, 'seconds.')
{ "repo_name": "tulip-control/tulip-control", "path": "contrib/AES/AES_specgen.py", "copies": "1", "size": "35673", "license": "bsd-3-clause", "hash": -3006080899344952300, "line_mean": 31.0800359712, "line_max": 135, "alpha_frac": 0.455750848, "autogenerated": false, "ratio": 3.5848658426288815, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45406166906288814, "avg_score": null, "num_lines": null }
""" An aircraft electric system specification generator presented in the HSCC 2013 paper. Huan Xu (mumu@caltech.edu) October 30, 2012 """ import sys, os import re, copy import numpy as np import scipy import scipy.io import networkx as nx import itertools import time #************************************************************************************************************************************** def write_envgen(genlist): """Declares generator environment variable Parameters ---------- genlist : list of all generators """ for i in genlist: f.write('env_vars['"'"'g'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_envru(rulist): """Declares rectifier unit environment variable Parameters ---------- rulist : list of all rectifier units """ for i in rulist: f.write('env_vars['"'"'ru'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusspec(essbuslist,time): """Writes counter for essential buses Parameters ---------- essbuslist : list of buses with essential loads time: int max time bus can be unpowered """ for i in essbuslist: f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=0) -> (next(countb'+str(i)+') = countb'+str(i)+'+1))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[]((b'+str(i)+'=1) -> (next(countb'+str(i)+') = 0))'"'") f.write('\n') f.write('guarantees += '"'"'&\\n\\t[](countb'+str(i)+' <= '+str(time)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_essbusdisc(essbuslist,time): """Declares bus counter system variable Parameters ---------- essbuslist : list of all essential buses time: int max time bus can be unpowered """ for i in essbuslist: f.write('disc_sys_vars['"'"'countb'+str(i)+"'"+'] = [x for x in range(0,'+str(time+1)+')]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discbus(buslist): """Declares bus system variables Parameters ---------- buslist : list of all buses """ for i in buslist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [0,1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discnull(nullist): """Declares null node system variables Parameters ---------- nullist : list of all null nodes """ for i in nullist: f.write('disc_sys_vars['"'"'b'+str(i)+"'"+'] = [1]') f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discdc_con(G,rulist,dcbuslist,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() # print edges for i in range(0,len(edges)): # print edges[i][0] # print edges[i][1] f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') for j in range(0,len(remove2)): f.write('disc_sys_vars['"'"'c'+str(remove2[j][0])+str(remove2[j][1])+"'"'] = [1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_discac_con(G,nullist): """Declares contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] H = copy.deepcopy(G) if len(nullist) >= 2: remove = all_pairs(nullist) H.remove_edges_from(remove) edges = H.edges() for i in range(0,len(edges)): if edges[i][0] < edges[i][1]: f.write('disc_sys_vars['"'"'c'+str(edges[i][0])+str(edges[i][1])+"'"'] = [0,1]\n') else: f.write('disc_sys_vars['"'"'c'+str(edges[i][1])+str(edges[i][0])+"'"'] = [0,1]\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def g_disconnect(G,genlist,buslist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators buslist: list of all ac buses """ for i in genlist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((g'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ru_disconnect(G,rulist,buslist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph rulist : list of all rectifiers buslist: list of all ac buses """ for i in rulist: for j in buslist: for e in G.edges(): if i in e and j in e: if i < j: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(i)+str(j)+'=0))'"'") else: f.write('guarantees += '"'"'&\\n\\t[]((ru'+str(i)+'=0) -> (c'+str(j)+str(i)+'=0))'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_pairs(gens): """Returns list of all generator pairs (potential parallels). Parameters ---------- gens : list list of all generator nodes """ answer = [] for i in range(len(gens)): for j in range(i+1, len(gens)): if (gens[i],gens[j]) not in answer: answer.append((gens[i],gens[j])) return answer #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def all_gens(list,G): """Finds all generator pairs that are connected through graph. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs """ pgens = [] for i in range(len(list)): if nx.has_path(G,list[i][0], list[i][1]) is True: pgens.append(list[i]) return pgens #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def ppaths(i,j,G): """Finds all contactors between two parallel sources. Converts to LTL. Parameters ---------- G : NetworkX graph source i : node Starting node for path target j : node Ending node for path """ result = nx.shortest_path(G,source=i,target=j) C = {} guarantees = 'guarantees += '"'"'&\\n\\t[](!(' for k in range(0,len(result)-1): if result[k] < result[k+1]: C[k] = 'c'+str(result[k])+str(result[k+1]) else: C[k] = 'c'+str(result[k+1])+str(result[k]) guarantees = guarantees + '('+str(C[0])+'=1)' for m in range(1,len(C)): guarantees = guarantees+ ' & ('+str(C[m])+'=1)' guarantees = guarantees + '))'"'" return guarantees #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def noparallel(G,list): """Writes non-paralleling specifications to file. Parameters ---------- G : NetworkX graph list : tuples list of all generator pairs with paths in G """ sourcetemp = (all_pairs(list)) source = all_gens(sourcetemp,G) for i in range(0,len(source)): mat = ppaths(source[i][0], source[i][1], G) f.write(mat) f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def faulttol(prob,allgens,genfail): """Finds all combinations of failures. Parameters ---------- prob: int probability up to which failure can occur allgens: list list of all components that can fail genfail: int probability of failure of single component """ tuples = int(prob/genfail) fails = [] temp = [] if tuples <= 1: fails = allgens[:] else: fails = allgens[:] for i in range(2,tuples+1): for temp in itertools.combinations(allgens,i): fails.append(temp) return fails #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_genassump(genfail,genlist): """Writes generator environment assumption Parameters ---------- genfail : int how many generators may fail at once time genlist : list of all generators """ f.write('assumptions += '"'"'&\\n\\t[]((g'+str(genlist[0])) for i in range(1,len(genlist)): f.write(' + g'+str(genlist[i])) f.write(') >= '+str(len(genlist)-genfail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_ruassump(rufail,rulist): """Writes rectifier environment assumption Parameters ---------- rufail : int how many rectifiers may fail at once time genlist : list of all generators """ if len(rulist) > 0: f.write('assumptions += '"'"'&\\n\\t[]((ru'+str(rulist[0])) for i in range(1,len(rulist)): f.write(' + ru'+str(rulist[i])) f.write(') >= '+str(len(rulist)-rufail)+')'"'") f.write('\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_ru_edges(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) return pairs #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def remove_rus(G,buslist,rulist): pairs = [] H = copy.deepcopy(G) if len(buslist) > 0: for i in busac: for j in rus: pairs.append((i,j)) H.remove_edges_from(pairs) return H #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def buspathnodes(G,busno,source): """Finds determines if path exists from bus to generator. Parameters ---------- busno: int node number for bus sources: list list of all generators G: NetworkX graph """ buspaths = [] if nx.has_path(G,busno,source): buspaths.append((busno,source)) return buspaths #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusprop(G,source,target): paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) for m in gens2: temp.append((source,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for path in nx.all_simple_paths(D,source,target): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass if len(paths[p]) > 2: for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in null: f.write(' & (b'+str(paths[p][j])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusprop(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def acbusspec(G,busno,gen): temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) for m in gens2: temp.append((busno,m)) D.remove_edges_from(temp) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) paths = [] for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: acbusspec(G,i,j) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_acbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) edges = remove_ru_edges(G,busac,rus) D.remove_edges_from(edges) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_dcbusalways(buslist): """Writes dc bus specification must always be powered Parameters ---------- buslist : list of all dc buses """ for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](b'+str(i)+' = 1)'"'"+'\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def dcbusprop(G,source,target): """Creates discrete properties for power status of dc buses Parameters ---------- G : networkX graph source : node dc bus target : node generator """ temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(target) D.remove_nodes_from(gens2) paths = [] C = [] for path in nx.all_simple_paths(D,source,target,cutoff=None): paths.append(path) for p in range(0,len(paths)): for i in range(0,len(paths[p])-1): if paths[p][i] in busdc and paths[p][i+1] in rus: pass elif paths[p][i] in rus and paths[p][i+1] in busdc: pass else: C.append((paths[p][i],paths[p][i+1])) f.write('disc_props['"'"'B'+str(source)+str(target)+str(p)+"'"'] = '"'") if paths[p][1] in gens: f.write('(g'+str(paths[p][1])+'=1)') elif paths[p][1] in busac: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in rus: f.write('(ru'+str(paths[p][1])+'=1)') elif paths[p][1] in busdc: f.write('(b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write('(b'+str(paths[p][1])+'=1)') else: pass for j in range(2,len(paths[p])): if paths[p][j] in gens: f.write(' & (g'+str(paths[p][j])+'=1)') elif paths[p][j] in busac: f.write(' & (b'+str(paths[p][j])+'=1)') elif paths[p][j] in rus: f.write(' & (ru'+str(paths[p][j])+'=1)') elif paths[p][1] in busdc: f.write(' & (b'+str(paths[p][1])+'=1)') elif paths[p][1] in null: f.write(' & (b'+str(paths[p][1])+'=1)') else: pass for k in range(0,len(C)): if C[k][0] < C[k][1]: f.write(' & (c'+str(C[k][0])+str(C[k][1])+'=1)') else: f.write(' & (c'+str(C[k][1])+str(C[k][0])+'=1)') f.write("'"'\n') C = [] def write_dcbusprop(G,buslist,genlist): """Writes dc bus properties Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ for i in buslist: for j in genlist: dcbusprop(G,i,j) def dcbusspec(G,busno,gen): """Creates specifications for when DC bus gets powered Parameters ---------- G : networkX graph busno : node dc bus gen : node generator """ paths = [] C = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) gens2.remove(gen) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,busno,gen,cutoff=None): paths.append(path) for j in range(0,len(paths)): f.write('guarantees += '"'"'&\\n\\t[]((B'+str(busno)+str(gen)+str(j)+') -> (b'+str(busno)+'=1))'"'"'\n') def write_dcbusspec(G,buslist,genlist): """Writes dc bus specifications Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] for i in buslist: for j in genlist: dcbusspec(G,i,j) def write_dcbusspec2(G,buslist,genlist): """Writes specifications for dc bus unpowered conditions Parameters ---------- G : networkX graph buslist : list of all dc buses genlist : list of all generators """ paths = [] temp = [] edges = [] D = copy.deepcopy(G) gens2 = copy.deepcopy(gens) for i in buslist: f.write('guarantees += '"'"'&\\n\\t[](!((0=1)') for j in genlist: gens2.remove(j) D.remove_nodes_from(gens2) for path in nx.all_simple_paths(D,i,j): paths.append(path) f.write(' | (B' + str(i) + str(j) + str(len(paths)-1)+')') paths = [] gens2 = copy.deepcopy(gens) D = copy.deepcopy(G) f.write(') -> (b'+str(i)+'=0))'"'"'\n') #************************************************************************************************ def write_sat_bool(complist): """Defines boolean components (not including contactors) Parameters ---------- complist : list of all components """ for i in complist: if i in gens: f.write('(define g'+str(i)+'::bool)\n') elif i in busac: f.write('(define b'+str(i)+'::bool)\n') elif i in busdc: f.write('(define b'+str(i)+'::bool)\n') elif i in null: f.write('(define b'+str(i)+'::bool)\n') elif i in rus: f.write('(define r'+str(i)+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_con(G,rulist,dcbuslist,nullist): """Defines contactors (removes contactors between rus and dcbuses) Parameters ---------- G : networkX graph rulist: list of rectifier units dcbuslist: list of all dc buses """ remove = [] remove2 = [] for i in rulist: for j in dcbuslist: remove.append((i,j)) L = copy.deepcopy(G) L.remove_edges_from(remove) remove2 = all_pairs(nullist) L.remove_edges_from(remove2) edges = L.edges() for i in range(0,len(edges)): f.write('(define c'+str(edges[i][0])+str(edges[i][1])+'::bool)\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_always(complist): """Asserts boolean components always on/powered Parameters ---------- complist : list of all components """ remove2 = [] for i in complist: if i in busac: f.write('(assert (= b'+str(i)+' true))\n') elif i in busdc: f.write('(assert (= b'+str(i)+' true))\n') elif i in null: f.write('(assert (= b'+str(i)+' true))\n') remove2 = all_pairs(null) edges = G.edges() for j in range(0,len(remove2)): if remove2[j] in edges: f.write('(assert (= c'+str(remove2[j][0])+str(remove2[j][1])+' true))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectgen(G,genlist): """Writes specification disconnecting contactor if generator is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] for i in genlist: neighbor = G.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= g'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_disconnectru(G,rulist): """Writes specification disconnecting contactor if rectifier is unhealthy Parameters ---------- G : networkX graph genlist : list of all generators """ neighbor = [] H = copy.deepcopy(G) H.remove_nodes_from(busdc) for i in rulist: neighbor = H.neighbors(i) for j in range(0, len(neighbor)): if i < neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(i)+str(neighbor[j])+' false)))\n') elif i > neighbor[j]: f.write('(assert (=> (= r'+str(i)+' false) (= c'+str(neighbor[j])+str(i)+' false)))\n') neighbor = [] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_noparallel(G,genlist): pairs = all_pairs(genlist) H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in range(0, len(pairs)): for path in nx.all_simple_paths(H, pairs[i][0], pairs[i][1]): f.write('(assert (not (and ') for j in range(0,len(path)-1): if path[j] < path[j+1]: f.write('(= c'+str(path[j])+str(path[j+1])+' true) ') else: f.write('(= c'+str(path[j+1])+str(path[j])+' true) ') f.write(')))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop1(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) H.remove_nodes_from(rus) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_acbusprop2(G,buslist, genlist): H = copy.deepcopy(G) H.remove_nodes_from(rus) # i = buslist[7] # j = genlist[5] for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) H.remove_nodes_from(rus) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') else: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) H.remove_nodes_from(rus) f.write(')) (= b'+str(i)+ ' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop1(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write('(assert (=> (and ') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(') (= b'+str(i)+' true)))\n') H = copy.deepcopy(G) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_dcbusprop2(G,buslist, genlist): H = copy.deepcopy(G) for i in buslist: f.write('(assert (=> (not (or ') for j in genlist: gen_temp = copy.deepcopy(gens) gen_temp.remove(j) H.remove_nodes_from(gen_temp) for path in nx.all_simple_paths(H,i,j): f.write(' (and') for k in range(1,len(path)): if path[k] in gens: f.write(' (= g'+str(path[k])+' true)') elif path[k] in busac: f.write(' (= b'+str(path[k])+' true)') elif path[k] in null: f.write(' (= b'+str(path[k])+' true)') elif path[k] in rus: f.write(' (= r'+str(path[k])+' true)') for m in range(0,len(path)-1): if path[m] in busdc and path[m+1] in rus: pass elif path[m] in rus and path[m+1] in busdc: pass elif path[m] < path[m+1]: f.write(' (= c'+str(path[m])+str(path[m+1])+' true)') elif path[m+1] < path[m]: f.write(' (= c'+str(path[m+1])+str(path[m])+' true)') f.write(')') H = copy.deepcopy(G) f.write(')) (= b'+str(i)+' false)))\n') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def write_sat_env(gfail,rfail): gentemp = [x for x in range(0,gfail+1)] rutemp = [x for x in range(0,rfail+1)] allgens = [] allrus = [] env_filename = 'env' count = 0 for i in gentemp: for j in itertools.combinations(gens,len(gens)-i): if len(gens)-i == 1: for k in gens: allgens.append(k) else: allgens.append(j) allgens = list(set(allgens)) if len(rus) == 0: pass else: for i in rutemp: for j in itertools.combinations(rus,len(rus)-i): if len(rus)-i == 1: for k in rus: allrus.append(k) else: allrus.append(j) allrus = list(set(allrus)) for i in range(0,len(allgens)): for j in range(0,len(allrus)): env_filename = 'env'+str(count)+'.ys' f2 = open(env_filename,"w") for k in gens: if isinstance(allgens[i],int): if k == allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') elif k in allgens[i]: f2.write('(assert (= g'+str(k)+' true))\n') else: f2.write('(assert (= g'+str(k)+' false))\n') for m in rus: if isinstance(allrus[j],int): if m == allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') elif m in allrus[j]: f2.write('(assert (= r'+str(m)+' true))\n') else: f2.write('(assert (= r'+str(m)+' false))\n') count = count+1 f2.write('(check)\n') f2.close() #************************************************************************************************ start = time.time() file_name = 'test_spec' #Load adjacency matrix from matfile data = scipy.io.loadmat('SLD.mat') datatemp = data['A'] # create matrix from matfile A = np.matrix(datatemp) #Failure Probabilities genfail = 1 rufail = 1 busfail = 0 #Node definitions busac = [2,3] busess = [2,3] busdc = [6,7] null = [] rus = [4,5] gens = [0,1] #Bus time nptime = 0 #Create networkx graph from adjacency matrix G=nx.from_numpy_matrix(A) print 'number of edges ' + str(len(G.edges())) print 'number of nodes ' + str(len(G.nodes())) #sets of failure states # fails = faulttol(10,gens,genfail) ################################################################ # Synthesize ################################################################ if ('tulip' in sys.argv): file_name = file_name+'.py' f = open(file_name, "w") # #environment variables print 'writing environment variables' write_envgen(gens) write_envru(rus) #discrete system variables print 'writing discrete system variables: buses' write_discbus(busac) write_discbus(busdc) write_discnull(null) print time.time()-start print 'writing discrete system variables: contactors' # write_discac_con(G,null) #only use this when there are NO DC components write_discdc_con(G,rus,busdc,null) print time.time()-start print 'writing discrete system variables: bus counters' write_essbusdisc(busess,nptime) #acbus discrete properties print 'removing ru paths' H = remove_rus(G,busac,rus) print time.time()-start print 'writing discrete bus properties: AC' write_acbusprop(H,busac,gens) print time.time()-start print 'writing discrete bus properties: DC' write_dcbusprop(G,busdc,gens) print time.time()-start #Environment assumptions print 'writing environment assumptions' write_genassump(genfail,gens) write_ruassump(rufail,rus) print time.time()-start #acbus power guarantees print 'writing bus power specifications: AC' write_acbusspec(H,busac,gens) write_acbusspec2(H,busac,gens) write_essbusspec(busess,nptime) print time.time()-start #disconnect unhealthy guarantees print 'disconnecting unhealthy generators' g_disconnect(G,gens,busac) g_disconnect(G,gens,null) print time.time()-start print 'disconnecting unhealthy rus' ru_disconnect(G,rus,busac) ru_disconnect(G,rus,null) print time.time()-start ##non-parallel guarantees print 'writing no paralleling specs' noparallel(G,gens) print time.time()-start #dc bus power specifications print 'writing bus power specifications: DC' write_dcbusspec(G,busdc,gens) write_dcbusspec2(G,busdc,gens) write_dcbusalways(busdc) f.close() print 'It took', time.time()-start, 'seconds.' ################################################################ # Synthesize ################################################################ if ('yices' in sys.argv): file_name = file_name+'.ys' f = open(file_name, "w") #Writing component definitions write_sat_bool(gens) write_sat_bool(busac) write_sat_bool(busdc) write_sat_bool(null) write_sat_bool(rus) #Writing contactor definitions write_sat_con(G,rus,busdc,null) #Writing always power bus and null assertions write_sat_always(busac) write_sat_always(busdc) write_sat_always(null) #Writing disconnect implications write_sat_disconnectgen(G,gens) write_sat_disconnectru(G,rus) write_sat_noparallel(G,gens) write_sat_acbusprop1(G,busac,gens) write_sat_acbusprop2(G,busac,gens) write_sat_dcbusprop1(G,busdc, gens) write_sat_dcbusprop2(G,busdc, gens) #write_environment assumptions write_sat_env(genfail,rufail) f.close() print 'It took', time.time()-start, 'seconds.'
{ "repo_name": "necozay/tulip-control", "path": "contrib/AES/AES_specgen.py", "copies": "1", "size": "35890", "license": "bsd-3-clause", "hash": 376596043563363840, "line_mean": 31.2751798561, "line_max": 135, "alpha_frac": 0.4524658679, "autogenerated": false, "ratio": 3.608848667672197, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9481320426900565, "avg_score": 0.015998821734326478, "num_lines": 1112 }
"""A naive benchmark script.""" from __future__ import print_function import textwrap import timeit def time_hashing(message_size): setup = textwrap.dedent(''' import random import csiphash key = ''.join(chr(random.randint(0, 255)) for _ in xrange(16)) message = ''.join(chr(random.randint(0, 255)) for _ in xrange({message_size})) ''').format(message_size=message_size) statement = 'csiphash.siphash24(key, message)' results = [] for _ in xrange(3): results.append(timeit.timeit(setup=setup, stmt=statement, number=1000000)) return sum(results) / 3.0 # From the SipHash paper (http://cr.yp.to/siphash/siphash-20120620.pdf): # # > A fair rule-of-thumb for the distribution on message-sizes on an Internet # > backbone is that roughly one-third of messages are 43 bytes (TCP ACKs), # > one-third are about 256 bytes (common PPP dialup MTU), and one-third are 1500 # > bytes (common Ethernet MTU). # # We're adding a 1MiB message size too, just for fun. MESSAGE_SIZES = [43, 256, 1500, (1024 * 1024)] if __name__ == '__main__': for message_size in MESSAGE_SIZES: average_time = time_hashing(message_size) print('siphash24(char[{message_size}]) = {average_time:0.2f} microseconds'.format(message_size=message_size, average_time=average_time))
{ "repo_name": "zacharyvoase/python-csiphash", "path": "test/perf_test.py", "copies": "1", "size": "1317", "license": "unlicense", "hash": 5908028136633492000, "line_mean": 33.6578947368, "line_max": 144, "alpha_frac": 0.6810933941, "autogenerated": false, "ratio": 3.2121951219512197, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.439328851605122, "avg_score": null, "num_lines": null }
"""A naive example of dependency injection on Python. Example implementation of dependency injection in Python from Martin Fowler's article about dependency injection and inversion of control: http://www.martinfowler.com/articles/injection.html This mini application uses ``movies`` library, that is configured to work with csv file movies database. """ import movies import movies.finders import example.db import example.main import settings import fixtures import dependency_injector.containers as containers import dependency_injector.providers as providers @containers.override(movies.MoviesModule) class MyMoviesModule(containers.DeclarativeContainer): """IoC container for overriding movies module component providers.""" finder = providers.Factory(movies.finders.CsvMovieFinder, csv_file_path=settings.MOVIES_CSV_PATH, delimiter=',', **movies.MoviesModule.finder.kwargs) class CsvApplication(containers.DeclarativeContainer): """IoC container of csv application component providers.""" main = providers.Callable(example.main.main, movie_lister=movies.MoviesModule.lister) init_db = providers.Callable(example.db.init_csv, movies_data=fixtures.MOVIES_SAMPLE_DATA, csv_file_path=settings.MOVIES_CSV_PATH, delimiter=',') if __name__ == '__main__': CsvApplication.init_db() CsvApplication.main()
{ "repo_name": "ets-labs/dependency_injector", "path": "examples/miniapps/movie_lister/app_csv.py", "copies": "1", "size": "1566", "license": "bsd-3-clause", "hash": 2833055148077426700, "line_mean": 30.9591836735, "line_max": 78, "alpha_frac": 0.6743295019, "autogenerated": false, "ratio": 4.423728813559322, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 49 }
# A naive Python implementation of LIS problem """ To make use of recursive calls, this function must return two things: 1) Length of LIS ending with element arr[n-1]. We use max_ending_here for this purpose 2) Overall maximum as the LIS may end with an element before arr[n-1] max_ref is used this purpose. The value of LIS of full array of size n is stored in *max_ref which is our final result """ # global variable to store the maximum global maximum def _lis(arr , n ): # to allow the access of global variable global maximum # Base Case if n == 1 : return 1 # maxEndingHere is the length of LIS ending with arr[n-1] maxEndingHere = 1 """Recursively get all LIS ending with arr[0], arr[1]..arr[n-2] IF arr[n-1] is maller than arr[n-1], and max ending with arr[n-1] needs to be updated, then update it""" for i in xrange(1, n): res = _lis(arr , i) if arr[i-1] < arr[n-1] and res+1 > maxEndingHere: maxEndingHere = res +1 # Compare maxEndingHere with overall maximum. And # update the overall maximum if needed maximum = max(maximum , maxEndingHere) return maxEndingHere def lis(arr): # to allow the access of global variable global maximum # lenght of arr n = len(arr) # maximum variable holds the result maximum = 1 # The function _lis() stores its result in maximum _lis(arr , n) return maximum # Driver program to test the above function arr = [10 , 22 , 9 , 33 , 21 , 50 , 41 , 60] n = len(arr) print "Length of lis is ", lis(arr)
{ "repo_name": "maazsq/Algorithms_Example", "path": "Longest-Common-Subsequence/Python/Longest_increasing _subsequence.py", "copies": "9", "size": "1596", "license": "apache-2.0", "hash": -5368068176726014000, "line_mean": 25.6, "line_max": 67, "alpha_frac": 0.6522556391, "autogenerated": false, "ratio": 3.477124183006536, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8629379822106537, "avg_score": null, "num_lines": null }
# A Naive recursive Python program to fin minimum number # operations to convert str1 to str2 def editDistance(str1, str2, m , n): # If first string is empty, the only option is to # insert all characters of second string into first if m==0: return n # If second string is empty, the only option is to # remove all characters of first string if n==0: return m # If last characters of two strings are same, nothing # much to do. Ignore last characters and get count for # remaining strings. if str1[m-1]==str2[n-1]: return editDistance(str1,str2,m-1,n-1) # If last characters are not same, consider all three # operations on last character of first string, recursively # compute minimum cost for all three operations and take # minimum of three values. return 1 + min(editDistance(str1, str2, m, n-1), # Insert editDistance(str1, str2, m-1, n), # Remove editDistance(str1, str2, m-1, n-1) # Replace ) # A Dynamic Programming based Python program for edit # distance problem def editDistDP(str1, str2, m, n): # Create a table to store results of subproblems dp = [[0 for x in range(n+1)] for x in range(m+1)] # Fill d[][] in bottom up manner for i in range(m+1): for j in range(n+1): # If first string is empty, only option is to # isnert all characters of second string if i == 0: dp[i][j] = j # Min. operations = j # If second string is empty, only option is to # remove all characters of second string elif j == 0: dp[i][j] = i # Min. operations = i # If last characters are same, ignore last char # and recur for remaining string elif str1[i-1] == str2[j-1]: dp[i][j] = dp[i-1][j-1] # If last character are different, consider all # possibilities and find minimum else: dp[i][j] = 1 + min(dp[i][j-1], # Insert dp[i-1][j], # Remove dp[i-1][j-1]) # Replace return dp[m][n] # Driver program to test the above function str1 = "sunday" str2 = "saturday" print editDistance(str1, str2, len(str1), len(str2))
{ "repo_name": "Evanc123/interview_prep", "path": "dynamic_programming/edit_distance.py", "copies": "1", "size": "2307", "license": "mit", "hash": 5823822797611899000, "line_mean": 32.9411764706, "line_max": 66, "alpha_frac": 0.5886432596, "autogenerated": false, "ratio": 3.638801261829653, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4727444521429653, "avg_score": null, "num_lines": null }
#Ana Jessye 1615310046 #Franklin Yuri 1615310033 #Bruno de Oliveira Freire- 1615310030 import random nome=raw_input("digite seu nome:") numero= random.randint(1,3) x=0 while x !=1: print("\n escolha o numero\n digite 1 para pedra \n digite 2 para tesoura \n digite 3 para papel") resposta=int(input("digite sua resposta:")) if resposta==1 and numero==2: print(" ganhou miseravel pois pedra vence tesoura \n computador colocou tesoura") if resposta==1 and numero==3: print(" voce perdeu miseravel pois papel vence pedra\n computador colocou papel") if resposta==2 and numero==1: print(" perdeu miseravel pois tesoura perde para pedra\n computador colocou pedra") if resposta==2 and numero==3: print(" ganhou miseravel pois tesoura vence papel\n computador colocou papel") if resposta==3 and numero==1: print(" voce ganhou pois papel vence pedra\n computador colocou pedra") if resposta==3 and numero==2: print(" perdeu miseravel pois papel perde para tesoura\n computador colocou tesoura") if resposta==numero: print("empate miseravel pois computador jogou:",numero) x=int(input("digite 1 para sair e outro numero para continuar:")) if x==1: print("\n-----------------Obrigado por jogar!!!!!--------------------")
{ "repo_name": "any1m1c/ipc20161", "path": "jokenpo/equipe1/jokenpo.py", "copies": "1", "size": "1368", "license": "apache-2.0", "hash": 7478189164922612000, "line_mean": 37, "line_max": 102, "alpha_frac": 0.6564327485, "autogenerated": false, "ratio": 2.809034907597536, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.39654676560975355, "avg_score": null, "num_lines": null }
#Ana Jessye 1615310046 #Franklin Yuri 1615310033 #Bruno de Oliveira Freire- 1615310030 import random nome=raw_input("digite seu nome:") numero= random.randint(1,5) x=0 while x !=1: print("\n escolha o numero\n digite 1 para pedra \n digite 2 para tesoura \n digite 3 para papel \n digite 4 para lagarto \n digite 5 para spock") resposta=int(input("digite sua resposta:")) if resposta==1 and numero==2: print(" ganhou miseravel pois pedra vence tesoura \n computador colocou tesoura") if resposta==1 and numero==3: print(" voce perdeu miseravel pois papel vence pedra\n computador colocou papel") if resposta==2 and numero==1: print(" perdeu miseravel pois tesoura perde para pedra\n computador colocou pedra") if resposta==2 and numero==3: print(" ganhou miseravel pois tesoura vence papel\n computador colocou papel") if resposta==3 and numero==1: print(" voce ganhou pois papel vence pedra\n computador colocou pedra") if resposta==3 and numero==2: print(" perdeu miseravel pois papel perde para tesoura\n computador colocou tesoura") if resposta==numero: print("empate miseravel pois computador jogou:",numero) if resposta==1 and numero==4: print(" ganhou miseravel pois pedra esmaga lagarto\n computador colocou lagarto") if resposta==4 and numero==1: print(" perdeu miseravel pois pedra esmaga lagarto\n computador colocou pedra") if resposta==4 and numero==5: print(" ganhou miseravel pois lagarto envenena spock\n computador colocou spock") if resposta==5 and numero==4: print(" perdeu miseravel pois spock e envenenado por lagarto\n computador colocou lagarto") if resposta==5 and numero==2: print("ganhou miseravel pois spock quebra tesoura\n computador colocou tesoura") if resposta==2 and numero==5: print(" perdeu miseravel pois tesoura e quebrada por spock\n computador colocou spock") if resposta==2 and numero==4: print("ganhou miseravel pois tesoura corta lagarto\n computador colocou lagarto") if resposta==4 and numero==2: print(" perdeu miseravel pois lagarto e cortada por tesoura\n computador colocou tesoura") if resposta==4 and numero==3: print(" ganhou miseravel pois lagarto come papel\n computador colocou papel") if resposta==3 and numero==4: print(" perdeu miseravel pois papel e comido por lagarto\n computador colocou lagarto") if resposta==3 and numero==5: print(" ganhou miseravel pois papel refuta spock\n computador colocou spock") if resposta==5 and numero==3: print(" perdeu miseravel pois spock e refutado por papel\n computador colocou papel") if resposta==5 and numero==1: print(" ganhou miseravel pois spock vaporiza pedra\n computador colocou pedra") if resposta==1 and numero==5: print(" perdeu miseravel pois pedra e vaporizada por spock\n computador colocou spock") x=int(input("digite 1 para sair e outro numero para continuar:")) if x==1: print("\n-----------------Obrigado por jogar!!!!!--------------------")
{ "repo_name": "any1m1c/ipc20161", "path": "jokenpo/equipe1/jokenpo_sheldon.py", "copies": "1", "size": "3214", "license": "apache-2.0", "hash": 489276649272721600, "line_mean": 49.21875, "line_max": 150, "alpha_frac": 0.6795270691, "autogenerated": false, "ratio": 2.802092414995641, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8616490399469834, "avg_score": 0.07302581692516154, "num_lines": 64 }
# An algorithm to work out the number of end points of a 'long' cluster (beta, etc) in order to detect crossed or divergent tracks # Note: This is currently only accurate for clusters of radius > ~20 # TODO Develop a similar algorithm for shorter blobs # Author: Cal Hewitt import numpy as np def is_single_track(blob): return num_end_points(blob) <= 2 def num_end_points(blob): cluster, best_fit_line, radius, centroid = blob.pixels, blob.best_fit_line, blob.radius, blob.centroid m, c = best_fit_line radius = int(np.ceil(radius)) # Make radius into an integer, bigger is better for avoiding errors # Define constants and initialise arrays which we will use a lot later pixel_ch_x = 1 / np.sqrt( (m**2) + 1) # For efficiency, change in x between sample points m_normal = (-1)*(1/m) # Gradient of the normal to the line of best fit all_pixel_clusters = [] num_end_points = 0 # To begin the process, we are going to step along line of best fit from c - r to c + r, 1 pixel at a time # For simplicity we call this 'left to right' # First, find the leftmost point ch_x = radius / np.sqrt( (m**2) + 1 ) # Change in x between centroid and leftmost point start_point = ( centroid[0] - ch_x, centroid[1] - m*ch_x ) # Now start stepping along the line of best fit, with i between 0 and diameter, 1 pixel at a time... for i in range( (radius*2) + 1): # First we locate the point on the line of best fit which corresponds to i current_point = (start_point[0] + (i*pixel_ch_x), start_point[1] + (m*i*pixel_ch_x)) # We want to check for pixels which 'correspond' to this point by seeing if the normal at this point intersects them # Use Bresenham's Algorithm to rasterise the normal r either side of current_point, and then check for clusters # Make up bresenham start and end (more points than are actually needed, but probs computationally easier this way as B's alg is very light) p1 = (int(current_point[0] - radius), int(current_point[1] - np.ceil(m_normal*radius))) p2 = (int(current_point[0] + radius), int(current_point[1] + np.ceil(m_normal*radius))) relevant_pixels = bresenham(p1, p2) # Make a list of 'clusters' of these relevant pixels, which are from separate branches relevant_pixel_clusters = [] last_active_pixel = None current_cluster = None for pixel in relevant_pixels: # Check that the pixel has been hit if pixel in cluster: if not current_cluster: current_cluster = [pixel] else: if pixels_adjacent(pixel, last_active_pixel): current_cluster.append(pixel) else: relevant_pixel_clusters.append(current_cluster) current_cluster = [pixel] last_active_pixel = pixel # If a cluster has been partially formed by the end of the loop, still use it if current_cluster: relevant_pixel_clusters.append(current_cluster) if relevant_pixel_clusters: all_pixel_clusters.append(relevant_pixel_clusters) # By this point, all_pixel_clusters contains a list of rows, each of these a list of clusters # Check for clusters with only one neighbour, as these will be end points for i in range(len(all_pixel_clusters)): active_row = all_pixel_clusters[i] for active_cluster in active_row: neighbours = 0 for check_cluster in all_pixel_clusters[i]: if clusters_adjacent(active_cluster, check_cluster) and (active_cluster != check_cluster): neighbours += 1 if i > 0: for check_cluster in all_pixel_clusters[i-1]: if clusters_adjacent(active_cluster, check_cluster): neighbours += 1 if i < (len(all_pixel_clusters) - 1): for check_cluster in all_pixel_clusters[i+1]: if clusters_adjacent(active_cluster, check_cluster): neighbours += 1 if neighbours == 1: num_end_points += 1 return num_end_points def pixels_adjacent(pixel1, pixel2, distance = 1): return abs(pixel2[0] - pixel1[0]) <= distance and abs(pixel2[1] - pixel1[1]) <= distance def clusters_adjacent(cluster1, cluster2): for p1 in cluster1: for p2 in cluster2: if pixels_adjacent(p1, p2, 2): # Hack as sometimes Bresenham lines will miss a pixel return True return False # An implementation of Bresenham's line algorithm, thanks to roguebasin.com def bresenham(start, end): x1, y1 = start x2, y2 = end dx = x2 - x1 dy = y2 - y1 is_steep = abs(dy) > abs(dx) if is_steep: x1, y1 = y1, x1 x2, y2 = y2, x2 swapped = False if x1 > x2: x1, x2 = x2, x1 y1, y2 = y2, y1 swapped = True dx = x2 - x1 dy = y2 - y1 error = int(dx / 2.0) ystep = 1 if y1 < y2 else -1 y = y1 points = [] for x in range(x1, x2 + 1): coord = (y, x) if is_steep else (x, y) points.append(coord) error -= abs(dy) if error < 0: y += ystep error += dx if swapped: points.reverse() return points
{ "repo_name": "calhewitt/lucid-utils", "path": "lucid_utils/classification/end_detection.py", "copies": "2", "size": "5414", "license": "mit", "hash": -6095883016788638000, "line_mean": 44.8813559322, "line_max": 148, "alpha_frac": 0.6056520133, "autogenerated": false, "ratio": 3.650708024275118, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5256360037575118, "avg_score": null, "num_lines": null }
""" Analize test results for finding bottlenecks """ import re import sys import csv import time import bisect import os.path import argparse import collections import yaml import texttable try: import pygraphviz as pgv except ImportError: pgv = None sys.path.append("/mnt/other/work/disk_perf_test_tool") from wally.run_test import load_data_from from wally.utils import b2ssize, b2ssize_10 class SensorInfo(object): def __init__(self, name, print_name, native_ext, to_bytes_coef): self.name = name self.print_name = print_name self.native_ext = native_ext self.to_bytes_coef = to_bytes_coef _SINFO = [ SensorInfo('recv_bytes', 'net_recv', 'B', 1), SensorInfo('send_bytes', 'net_send', 'B', 1), SensorInfo('sectors_written', 'hdd_write', 'Sect', 512), SensorInfo('sectors_read', 'hdd_read', 'Sect', 512), SensorInfo('reads_completed', 'read_op', 'OP', None), SensorInfo('writes_completed', 'write_op', 'OP', None), SensorInfo('procs_blocked', 'blocked_procs', 'P', None), ] SINFO_MAP = dict((sinfo.name, sinfo) for sinfo in _SINFO) to_bytes = dict((sinfo.name, sinfo.to_bytes_coef) for sinfo in _SINFO if sinfo.to_bytes_coef is not None) class NodeSensorsData(object): def __init__(self, source_id, hostname, headers, values): self.source_id = source_id self.hostname = hostname self.headers = headers self.values = values self.times = None def finalize(self): self.times = [v[0] for v in self.values] def get_data_for_interval(self, beg, end): p1 = bisect.bisect_left(self.times, beg) p2 = bisect.bisect_right(self.times, end) obj = self.__class__(self.source_id, self.hostname, self.headers, self.values[p1:p2]) obj.times = self.times[p1:p2] return obj def __getitem__(self, name): idx = self.headers.index(name.split('.')) # +1 as first is a time return [val[idx] for val in self.values] def load_results_csv(fd): data = fd.read() results = {} for block in data.split("NEW_DATA"): block = block.strip() if len(block) == 0: continue it = csv.reader(block.split("\n")) headers = next(it) sens_data = [map(float, vals) for vals in it] source_id, hostname = headers[:2] headers = [(None, 'time')] + \ [header.split('.') for header in headers[2:]] assert set(map(len, headers)) == set([2]) results[source_id] = NodeSensorsData(source_id, hostname, headers, sens_data) return results def load_test_timings(fname, max_diff=1000): raw_map = collections.defaultdict(lambda: []) class data(object): pass load_data_from(fname)(None, data) for test_type, test_results in data.results.items(): if test_type == 'io': for tests_res in test_results: raw_map[tests_res.config.name].append(tests_res.run_interval) result = {} for name, intervals in raw_map.items(): intervals.sort() curr_start, curr_stop = intervals[0] curr_result = [] for (start, stop) in intervals[1:]: if abs(curr_start - start) < max_diff: # if abs(curr_stop - stop) > 2: # print abs(curr_stop - stop) assert abs(curr_stop - stop) < max_diff else: assert start + max_diff >= curr_stop assert stop > curr_stop curr_result.append((curr_start, curr_stop)) curr_start, curr_stop = start, stop curr_result.append((curr_start, curr_stop)) merged_res = [] curr_start, curr_stop = curr_result[0] for start, stop in curr_result[1:]: if abs(curr_stop - start) < max_diff: curr_stop = stop else: merged_res.append((curr_start, curr_stop)) curr_start, curr_stop = start, stop merged_res.append((curr_start, curr_stop)) result[name] = merged_res return result critical_values = dict( io_queue=1, usage_percent=0.8, procs_blocked=1, procs_queue=1) class AggregatedData(object): def __init__(self, sensor_name): self.sensor_name = sensor_name # (node, device): count self.per_device = collections.defaultdict(lambda: 0) # node: count self.per_node = collections.defaultdict(lambda: 0) # role: count self.per_role = collections.defaultdict(lambda: 0) # (role_or_node, device_or_*): count self.all_together = collections.defaultdict(lambda: 0) def __str__(self): res = "<AggregatedData({0})>\n".format(self.sensor_name) for (role_or_node, device), val in self.all_together.items(): res += " {0}:{1} = {2}\n".format(role_or_node, device, val) return res def total_consumption(sensors_data, roles_map): result = {} for name, sensor_data in sensors_data.items(): for pos, (dev, sensor) in enumerate(sensor_data.headers): if 'time' == sensor: continue try: ad = result[sensor] except KeyError: ad = result[sensor] = AggregatedData(sensor) val = sum(vals[pos] for vals in sensor_data.values) ad.per_device[(sensor_data.hostname, dev)] += val # vals1 = sensors_data['localhost:22']['sdc.sectors_read'] # vals2 = sensors_data['localhost:22']['sdb.sectors_written'] # from matplotlib import pyplot as plt # plt.plot(range(len(vals1)), vals1) # plt.plot(range(len(vals2)), vals2) # plt.show() # exit(1) for ad in result.values(): for (hostname, dev), val in ad.per_device.items(): ad.per_node[hostname] += val for role in roles_map[hostname]: ad.per_role[role] += val ad.all_together[(hostname, dev)] = val for role, val in ad.per_role.items(): ad.all_together[(role, '*')] = val for node, val in ad.per_node.items(): ad.all_together[(node, '*')] = val return result def avg_load(sensors_data): load = collections.defaultdict(lambda: 0) min_time = 0xFFFFFFFFFFF max_time = 0 for sensor_data in sensors_data.values(): min_time = min(min_time, min(sensor_data.times)) max_time = max(max_time, max(sensor_data.times)) for name, max_val in critical_values.items(): for pos, (dev, sensor) in enumerate(sensor_data.headers): if sensor == name: for vals in sensor_data.values: if vals[pos] > max_val: load[(sensor_data.hostname, dev, sensor)] += 1 return load, max_time - min_time def print_bottlenecks(sensors_data, max_bottlenecks=15): load, duration = avg_load(sensors_data) if not load: return "\n*** No bottlenecks found *** \n" rev_items = ((v, k) for (k, v) in load.items()) res = sorted(rev_items, reverse=True)[:max_bottlenecks] max_name_sz = max(len(name) for _, name in res) frmt = "{{0:>{0}}} | {{1:>4}}".format(max_name_sz) table = [frmt.format("Component", "% times load > 100%")] for (v, k) in res: table.append(frmt.format(k, int(v * 100.0 / duration + 0.5))) return "\n".join(table) def print_consumption(agg, min_transfer=None): rev_items = [] for (node_or_role, dev), v in agg.all_together.items(): rev_items.append((int(v), node_or_role + ':' + dev)) res = sorted(rev_items, reverse=True) if min_transfer is not None: res = [(v, k) for (v, k) in res if v >= min_transfer] if len(res) == 0: return None res = [(b2ssize(v) + "B", k) for (v, k) in res] max_name_sz = max(len(name) for _, name in res) max_val_sz = max(len(val) for val, _ in res) frmt = " {{0:>{0}}} | {{1:>{1}}} ".format(max_name_sz, max_val_sz) table = [frmt.format("Component", "Usage")] for (v, k) in res: table.append(frmt.format(k, v)) return "\n".join(table) def make_roles_mapping(source_id_mapping, source_id2hostname): result = {} for ssh_url, roles in source_id_mapping.items(): if '@' in ssh_url: source_id = ssh_url.split('@')[1] else: source_id = ssh_url.split('://')[1] if source_id.count(':') == 2: source_id = source_id.rsplit(":", 1)[0] if source_id.endswith(':'): source_id += "22" if source_id in source_id2hostname: result[source_id] = roles result[source_id2hostname[source_id]] = roles for testnode_src in (set(source_id2hostname) - set(result)): result[testnode_src] = ['testnode'] result[source_id2hostname[testnode_src]] = ['testnode'] return result def get_testdata_size(consumption): max_data = 0 for name, sens in SINFO_MAP.items(): if sens.to_bytes_coef is not None: agg = consumption.get(name) if agg is not None: cdt = agg.per_role.get('testnode', 0) * sens.to_bytes_coef max_data = max(max_data, cdt) return max_data def get_testop_cout(consumption): max_op = 0 for name, sens in SINFO_MAP.items(): if sens.to_bytes_coef is None: agg = consumption.get(name) if agg is not None: max_op = max(max_op, agg.per_role.get('testnode', 0)) return max_op def get_data_for_intervals(data, intervals): res = {} for begin, end in intervals: for name, node_data in data.items(): ndata = node_data.get_data_for_interval(begin, end) res[name] = ndata return res class Host(object): def __init__(self, name=None): self.name = name self.hdd_devs = {} self.net_devs = None def plot_consumption(per_consumer_table, fields, refload): if pgv is None: return hosts = {} storage_sensors = ('sectors_written', 'sectors_read') for (hostname, dev), consumption in per_consumer_table.items(): if hostname not in hosts: hosts[hostname] = Host(hostname) host = hosts[hostname] cons_map = dict(zip(fields, consumption)) for sn in storage_sensors: vl = cons_map.get(sn, 0) if vl > 0: host.hdd_devs.setdefault(dev, {})[sn] = vl p = pgv.AGraph(name='system', directed=True) net = "Network" p.add_node(net) in_color = 'red' out_color = 'green' for host in hosts.values(): g = p.subgraph(name="cluster_" + host.name, label=host.name, color="blue") g.add_node(host.name, shape="diamond") p.add_edge(host.name, net) p.add_edge(net, host.name) for dev_name, values in host.hdd_devs.items(): if dev_name == '*': continue to = values.get('sectors_written', 0) frm = values.get('sectors_read', 0) to_pw = 7 * to / refload frm_pw = 7 * frm / refload min_with = 0.1 if to_pw > min_with or frm_pw > min_with: dev_fqn = host.name + "." + dev_name g.add_node(dev_fqn) if to_pw > min_with: g.add_edge(host.name, dev_fqn, label=b2ssize(to) + "B", penwidth=to_pw, fontcolor=out_color, color=out_color) if frm_pw > min_with: g.add_edge(dev_fqn, host.name, label=b2ssize(frm) + "B", penwidth=frm_pw, color=in_color, fontcolor=in_color) return p.string() def parse_args(args): parser = argparse.ArgumentParser() parser.add_argument('-t', '--time_period', nargs=2, type=int, default=None, help="Begin and end time for tests") parser.add_argument('-m', '--max-bottlenek', type=int, default=15, help="Max bottleneck to show") parser.add_argument('-x', '--max-diff', type=int, default=10, help="Max bottleneck to show in" + "0.1% from test nodes summ load") parser.add_argument('-d', '--debug-ver', action='store_true', help="Full report with original data") parser.add_argument('-u', '--user-ver', action='store_true', default=True, help="Avg load report") parser.add_argument('-s', '--select-loads', nargs='*', default=[]) parser.add_argument('-f', '--fields', nargs='*', default=[]) parser.add_argument('results_folder') return parser.parse_args(args[1:]) def main(argv): opts = parse_args(argv) stor_dir = os.path.join(opts.results_folder, 'sensor_storage') data = {} source_id2hostname = {} csv_files = os.listdir(stor_dir) for fname in csv_files: assert re.match(r"\d+_\d+.csv$", fname) csv_files.sort(key=lambda x: int(x.split('_')[0])) for fname in csv_files: with open(os.path.join(stor_dir, fname)) as fd: for name, node_sens_data in load_results_csv(fd).items(): if name in data: assert data[name].hostname == node_sens_data.hostname assert data[name].source_id == node_sens_data.source_id assert data[name].headers == node_sens_data.headers data[name].values.extend(node_sens_data.values) else: data[name] = node_sens_data for nd in data.values(): assert nd.source_id not in source_id2hostname source_id2hostname[nd.source_id] = nd.hostname nd.finalize() roles_file = os.path.join(opts.results_folder, 'nodes.yaml') src2roles = yaml.load(open(roles_file)) timings = load_test_timings(opts.results_folder) roles_map = make_roles_mapping(src2roles, source_id2hostname) max_diff = float(opts.max_diff) / 1000 fields = ('recv_bytes', 'send_bytes', 'sectors_read', 'sectors_written', 'reads_completed', 'writes_completed') if opts.fields != []: fields = [field for field in fields if field in opts.fields] for test_name, intervals in sorted(timings.items()): if opts.select_loads != []: if test_name not in opts.select_loads: continue data_chunks = get_data_for_intervals(data, intervals) consumption = total_consumption(data_chunks, roles_map) bottlenecks = print_bottlenecks(data_chunks) testdata_sz = get_testdata_size(consumption) * max_diff testop_count = get_testop_cout(consumption) * max_diff per_consumer_table = {} per_consumer_table_str = {} all_consumers = set()#consumption.values()[0].all_together) for value in consumption.values(): all_consumers = all_consumers | set(value.all_together) fields = [field for field in fields if field in consumption] all_consumers_sum = [] for consumer in all_consumers: tb_str = per_consumer_table_str[consumer] = [] tb = per_consumer_table[consumer] = [] vl = 0 for name in fields: val = consumption[name].all_together[consumer] if SINFO_MAP[name].to_bytes_coef is None: if val < testop_count: tb_str.append('0') else: tb_str.append(b2ssize_10(int(val))) else: val = int(val) * SINFO_MAP[name].to_bytes_coef if val < testdata_sz: tb_str.append('-') else: tb_str.append(b2ssize(val) + "B") tb.append(int(val)) vl += int(val) all_consumers_sum.append((vl, consumer)) all_consumers_sum.sort(reverse=True) plot_consumption(per_consumer_table, fields, testdata_sz / max_diff) tt = texttable.Texttable(max_width=130) tt.set_cols_align(["l"] + ["r"] * len(fields)) header = ["Name"] for fld in fields: if fld in SINFO_MAP: header.append(SINFO_MAP[fld].print_name) else: header.append(fld) tt.header(header) for summ, consumer in all_consumers_sum: if summ > 0: tt.add_row([":".join(consumer)] + per_consumer_table_str[consumer]) tt.set_deco(texttable.Texttable.VLINES | texttable.Texttable.HEADER) res = tt.draw() max_len = max(map(len, res.split("\n"))) print test_name.center(max_len) print res print bottlenecks if __name__ == "__main__": exit(main(sys.argv))
{ "repo_name": "Mirantis/disk_perf_test_tool", "path": "scripts/postprocessing/bottleneck.py", "copies": "1", "size": "17445", "license": "apache-2.0", "hash": -6338950476395135000, "line_mean": 30.4324324324, "line_max": 77, "alpha_frac": 0.5450272284, "autogenerated": false, "ratio": 3.588767743262703, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4633794971662703, "avg_score": null, "num_lines": null }
"""Analog analysis module.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import datetime from analog.exceptions import MissingFormatError from analog.formats import LogFormat from analog.report import Report #: Default verbs to monitor if unconfigured. DEFAULT_VERBS = ['DELETE', 'GET', 'PATCH', 'POST', 'PUT'] #: Default status codes to monitor if unconfigured. DEFAULT_STATUS_CODES = [1, 2, 3, 4, 5] #: Default paths (all) to monitor if unconfigured. DEFAULT_PATHS = [] class Analyzer: """Log analysis utility. Scan a logfile for logged requests and analyze calculate statistical metrics in a :py:class:`analog.report.Report`. """ def __init__(self, log, format, pattern=None, time_format=None, verbs=DEFAULT_VERBS, status_codes=DEFAULT_STATUS_CODES, paths=DEFAULT_PATHS, max_age=None, path_stats=False): """Configure log analyzer. :param log: handle on logfile to read and analyze. :type log: :py:class:`io.TextIOWrapper` :param format: log format identifier or 'custom'. :type format: ``str`` :param pattern: custom log format pattern expression. :type pattern: ``str`` :param time_format: log entry timestamp format (strftime compatible). :type time_format: ``str`` :param verbs: HTTP verbs to be tracked. Defaults to :py:data:`analog.analyzer.DEFAULT_VERBS`. :type verbs: ``list`` :param status_codes: status_codes to be tracked. May be prefixes, e.g. ["100", "2", "3", "4", "404" ]. Defaults to :py:data:`analog.analyzer.DEFAULT_STATUS_CODES`. :type status_codes: ``list`` :param paths: Paths to explicitly analyze. If not defined, paths are detected automatically. Defaults to :py:data:`analog.analyzer.DEFAULT_PATHS`. :type paths: ``list`` of ``str`` :param max_age: Max. age of log entries to analyze in minutes. Unlimited by default. :type max_age: ``int`` :raises: :py:class:`analog.exceptions.MissingFormatError` if no ``format`` is specified. """ self._log = log formats = LogFormat.all_formats() if format in formats: self._format = formats[format] elif format == 'custom': self._format = LogFormat('custom', pattern=pattern, time_format=time_format) else: raise MissingFormatError( "Require log format. Specify format name or custom regex " "pattern and timestamp format.") self._verbs = verbs self._status_codes = status_codes self._pathconf = paths self._max_age = max_age # execution time self.execution_time = None def _monitor_path(self, path): """Convert full request path to monitored path. If no path groups are configured to be monitored, all full paths are. :param path: the full request path. :type path: ``str`` :returns: the monitored path (part of ``path``) or ``None`` if not monitored. :rtype: ``str`` or ``None`` """ if not self._pathconf: return path for monitored in self._pathconf: if path.startswith(monitored): return monitored return None def _timestamp(self, time_str): """Convert timestamp strings from nginx to datetime objects. Format is "15/Jan/2014:14:12:50 +0000". :returns: request timestamp datetime. :rtype: :py:class:`datetime.datetime` """ return datetime.datetime.strptime(time_str, self._format.time_format) def __call__(self): """Analyze defined logfile. :returns: log analysis report object. :rtype: :py:class:`analog.report.Report` """ if self._max_age is not None: self._now = datetime.datetime.now() self._now = self._now.replace(second=0, microsecond=0) self._min_time = ( self._now - datetime.timedelta(minutes=self._max_age)) report = Report(self._verbs, self._status_codes) # read lines from logfile for the last max_age minutes for line in self._log: # parse line match = self._format.pattern.search(line) if match is None: continue log_entry = self._format.entry(match) if self._max_age is not None: # don't process anything older than max_age timestamp = self._timestamp(log_entry.timestamp) if timestamp < self._min_time: continue # stop processing when now was reached if timestamp > self._now: break # parse request path = self._monitor_path(log_entry.path) if path is None: continue # collect the numbers report.add( path=path, verb=log_entry.verb, status=int(log_entry.status), time=float(log_entry.request_time), upstream_time=float(log_entry.upstream_response_time), body_bytes=int(log_entry.body_bytes_sent)) # end timestamp report.finish() return report def analyze(log, format, pattern=None, time_format=None, verbs=DEFAULT_VERBS, status_codes=DEFAULT_STATUS_CODES, paths=DEFAULT_PATHS, max_age=None, path_stats=False, timing=False, output_format=None): """Convenience wrapper around :py:class:`analog.analyzer.Analyzer`. :param log: handle on logfile to read and analyze. :type log: :py:class:`io.TextIOWrapper` :param format: log format identifier or 'custom'. :type format: ``str`` :param pattern: custom log format pattern expression. :type pattern: ``str`` :param time_format: log entry timestamp format (strftime compatible). :type time_format: ``str`` :param verbs: HTTP verbs to be tracked. Defaults to :py:data:`analog.analyzer.DEFAULT_VERBS`. :type verbs: ``list`` :param status_codes: status_codes to be tracked. May be prefixes, e.g. ["100", "2", "3", "4", "404" ]. Defaults to :py:data:`analog.analyzer.DEFAULT_STATUS_CODES`. :type status_codes: ``list`` :param paths: Paths to explicitly analyze. If not defined, paths are detected automatically. Defaults to :py:data:`analog.analyzer.DEFAULT_PATHS`. :type paths: ``list`` of ``str`` :param max_age: Max. age of log entries to analyze in minutes. Unlimited by default. :type max_age: ``int`` :param path_stats: Print per-path analysis report. Default off. :type path_stats: ``bool`` :param timing: print analysis timing information? :type timing: ``bool`` :param output_format: report output format. :type output_format: ``str`` :returns: log analysis report object. :rtype: :py:class:`analog.report.Report` """ analyzer = Analyzer(log=log, format=format, pattern=pattern, time_format=time_format, verbs=verbs, status_codes=status_codes, paths=paths, max_age=max_age, path_stats=path_stats) report = analyzer() # print timing information if timing and report.execution_time: print("Analyzed logs in {:.3f}s.\n".format(report.execution_time)) # print report in requested output format print(report.render(path_stats=path_stats, output_format=output_format)) return report
{ "repo_name": "fabianbuechler/analog", "path": "analog/analyzer.py", "copies": "1", "size": "7830", "license": "mit", "hash": 5325047117719476000, "line_mean": 35.7605633803, "line_max": 78, "alpha_frac": 0.5946360153, "autogenerated": false, "ratio": 4.164893617021277, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 213 }
# AnalogClock constants # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. # Style options that control the general clock appearance, # chosen via SetClockStyle. SHOW_QUARTERS_TICKS = 1 SHOW_HOURS_TICKS = 2 SHOW_MINUTES_TICKS = 4 ROTATE_TICKS = 8 SHOW_HOURS_HAND = 16 SHOW_MINUTES_HAND = 32 SHOW_SECONDS_HAND = 64 SHOW_SHADOWS = 128 OVERLAP_TICKS = 256 SHOW_SWEEP_HAND = 512 DEFAULT_CLOCK_STYLE = SHOW_HOURS_TICKS|SHOW_MINUTES_TICKS| \ SHOW_HOURS_HAND|SHOW_MINUTES_HAND|SHOW_SECONDS_HAND| \ SHOW_SHADOWS|ROTATE_TICKS # Style options that control the appearance of tick marks, # chosen via SetTickStyle. TICKS_NONE = 1 TICKS_SQUARE = 2 TICKS_CIRCLE = 4 TICKS_POLY = 8 TICKS_DECIMAL = 16 TICKS_ROMAN = 32 TICKS_BINARY = 64 TICKS_HEX = 128 # Constants that may be used as 'target' keyword value in # the various Get/Set methods. HOUR = 1 MINUTE = 2 SECOND = 4 SWEEP = 8 ALL = HOUR|MINUTE|SECOND|SWEEP # ## ### eof
{ "repo_name": "CarlFK/clocky", "path": "analogclock/styles.py", "copies": "1", "size": "1304", "license": "mit", "hash": -7314648424241791000, "line_mean": 24.612244898, "line_max": 76, "alpha_frac": 0.5805214724, "autogenerated": false, "ratio": 2.716666666666667, "config_test": false, "has_no_keywords": true, "few_assignments": false, "quality_score": 0.3797188139066667, "avg_score": null, "num_lines": null }
# AnalogClock demo # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 12 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. import wx import wx.lib.analogclock as ac #---------------------------------------------------------------------- class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent) # A mostly default clock c1 = ac.AnalogClock(self, size=(200,200)) if True: # for a simpler test case just set this to False and # only the one clock will be created # A plain clock, with square hour and round minute marks, no # shadow, static border c2 = ac.AnalogClock(self, style=wx.STATIC_BORDER, hoursStyle=ac.TICKS_SQUARE, minutesStyle=ac.TICKS_CIRCLE, clockStyle=ac.SHOW_HOURS_TICKS| \ ac.SHOW_MINUTES_TICKS| ac.SHOW_HOURS_HAND| \ ac.SHOW_MINUTES_HAND| \ ac.SHOW_SECONDS_HAND) c2.SetTickSize(12, target=ac.HOUR) # No minute tick marks c3 = ac.AnalogClock(self, hoursStyle=ac.TICKS_CIRCLE, clockStyle=ac.SHOW_HOURS_TICKS| \ ac.SHOW_HOURS_HAND| \ ac.SHOW_MINUTES_HAND| \ ac.SHOW_SECONDS_HAND| \ ac.SHOW_SHADOWS) c3.SetTickSize(12) # A clock with hex numbers no seconds hand and different colours. c4 = ac.AnalogClock(self, hoursStyle=ac.TICKS_HEX, clockStyle=ac.SHOW_HOURS_TICKS| \ ac.SHOW_HOURS_HAND| \ ac.SHOW_MINUTES_HAND| \ ac.SHOW_SHADOWS) colour = wx.Colour(0, 255, 255) c4.SetForegroundColour(colour) colour = wx.Colour(0, 132, 132) c4.SetShadowColour(colour) c4.SetTickFont(wx.Font(10, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.BOLD)) c4.SetBackgroundColour(wx.BLACK) c4.SetFaceBorderColour(wx.BLACK) c4.SetFaceFillColour(wx.BLACK) # A clock with binary numbers shown only at the quarter tick marks, # no minutes ticks and different colours. c5 = ac.AnalogClock(self, style = wx.RAISED_BORDER, hoursStyle=ac.TICKS_BINARY, clockStyle=ac.SHOW_QUARTERS_TICKS| \ ac.SHOW_HOURS_HAND| \ ac.SHOW_MINUTES_HAND| \ ac.SHOW_SECONDS_HAND| \ ac.SHOW_SHADOWS) colour = wx.Colour(0, 128, 0) c5.SetHandFillColour(colour, target=ac.SECOND) c5.SetHandBorderColour(colour, target=ac.SECOND) c5.SetBackgroundColour(colour) colour = wx.Colour(128, 0, 64) c5.SetTickFillColour(colour) c5.SetFaceBorderColour(colour) c5.SetFaceBorderWidth(1) colour = wx.Colour(0, 198, 0) c5.SetFaceFillColour(colour) c5.SetShadowColour(wx.WHITE) # A clock with a sunken border, roman numerals shown only at the # quarter tick marks with a roman font, circular minutes ticks, # no seconds hand, no shadows, tick overlapping and different colours. c6 = ac.AnalogClock(self, style = wx.SUNKEN_BORDER, hoursStyle=ac.TICKS_ROMAN, minutesStyle=ac.TICKS_CIRCLE, clockStyle=ac.SHOW_QUARTERS_TICKS| \ ac.SHOW_MINUTES_TICKS| \ ac.SHOW_HOURS_HAND| \ ac.SHOW_MINUTES_HAND| \ ac.OVERLAP_TICKS) colour = wx.Colour(128, 0, 0) c6.SetHandFillColour(colour) colour = wx.Colour(179, 0, 89) c6.SetHandBorderColour(colour) c6.SetTickFillColour(colour) c6.SetTickBorderColour(colour) colour = wx.Colour(225, 255, 255) c6.SetFaceBorderColour(colour) c6.SetBackgroundColour(colour) colour = wx.Colour(249, 255, 255) c6.SetFaceFillColour(colour) colour = wx.Colour(255, 213, 213) c6.SetShadowColour(colour) c6.SetTickFont(wx.Font(10, wx.FONTFAMILY_ROMAN, wx.NORMAL, wx.BOLD)) # layout the clocks in a grid gs = wx.GridSizer(2, 3, 4, 4) gs.Add(c1, 0, wx.EXPAND) gs.Add(c2, 0, wx.EXPAND) gs.Add(c3, 0, wx.EXPAND) gs.Add(c4, 0, wx.EXPAND) gs.Add(c5, 0, wx.EXPAND) gs.Add(c6, 0, wx.EXPAND) # put it in another sizer for a border sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(gs, 1, wx.EXPAND|wx.ALL, 10) self.SetSizerAndFit(sizer) #---------------------------------------------------------------------- def runTest(frame, nb, log): win = TestPanel(nb, log) return win #---------------------------------------------------------------------- overview = """<html> <PRE><FONT SIZE=-1> """ + ac.__doc__.replace("<", "").replace(">", "") + """ </FONT></PRE>""" if __name__ == '__main__': import sys,os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
{ "repo_name": "walterfan/snippets", "path": "python/exam/count_down_clock.py", "copies": "1", "size": "6002", "license": "apache-2.0", "hash": 5762391147293910000, "line_mean": 41.5673758865, "line_max": 82, "alpha_frac": 0.467844052, "autogenerated": false, "ratio": 3.9564930784442978, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49243371304442973, "avg_score": null, "num_lines": null }
# Analog Clock Demo # Chapter 5 import sys, random, math, pygame from pygame.locals import * from datetime import datetime, date, time def print_text(font, x, y, text, color=(255, 255, 255)): imgText = font.render(text, True, color) screen.blit(imgText, (x, y)) def wrap_angle(angle): return angle % 360 # main program begins pygame.init() screen = pygame.display.set_mode((600, 500)) pygame.display.set_caption("Analog Clock Demo") font = pygame.font.Font(None, 36) orange = 220, 180, 0 white = 255, 255, 255 yellow = 255, 255, 0 pink = 255, 100, 100 pos_x = 300 pos_y = 250 radius = 250 angle = 360 # repeating loop while True: for event in pygame.event.get(): if event.type == QUIT: sys.exit() keys = pygame.key.get_pressed() if keys[K_ESCAPE]: sys.exit() screen.fill((0, 0, 100)) # draw one step around the circle pygame.draw.circle(screen, white, (pos_x, pos_y), radius, 6) # draw the clock numbers 1-12 for n in range(1, 13): angle = math.radians(n * (360 / 12) - 90) x = math.cos(angle) * (radius - 20) - 10 y = math.sin(angle) * (radius - 20) - 10 print_text(font, pos_x + x, pos_y + y, str(n)) # get the time of day today = datetime.today() hours = today.hour % 12 minutes = today.minute seconds = today.second # draw the hours hand hour_angle = wrap_angle(hours * (360 / 12) - 90) hour_angle = math.radians(hour_angle) hour_x = math.cos(hour_angle) * (radius - 80) hour_y = math.sin(hour_angle) * (radius - 80) target = (pos_x + hour_x, pos_y + hour_y) pygame.draw.line(screen, pink, (pos_x, pos_y), target, 25) # draw the minutes hand min_angle = wrap_angle(minutes * (360 / 60) - 90) min_angle = math.radians(min_angle) min_x = math.cos(min_angle) * (radius - 60) min_y = math.sin(min_angle) * (radius - 60) target = (pos_x + min_x, pos_y + min_y) pygame.draw.line(screen, orange, (pos_x, pos_y), target, 12) # draw the seconds hand sec_angle = wrap_angle(seconds * (360 / 60) - 90) sec_angle = math.radians(sec_angle) sec_x = math.cos(sec_angle) * (radius - 40) sec_y = math.sin(sec_angle) * (radius - 40) target = (pos_x + sec_x, pos_y + sec_y) pygame.draw.line(screen, yellow, (pos_x, pos_y), target, 6) # cover the center pygame.draw.circle(screen, white, (pos_x, pos_y), 20) print_text(font, 0, 0, str(hours) + ":" + str(minutes) + ":" + str(seconds)) pygame.display.update()
{ "repo_name": "Great-Li-Xin/PythonDev", "path": "Games/resources/code/chap05/AnalogClock.py", "copies": "1", "size": "2625", "license": "mit", "hash": 3436825750612795400, "line_mean": 27.4943820225, "line_max": 80, "alpha_frac": 0.5843809524, "autogenerated": false, "ratio": 3.0034324942791764, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4087813446679176, "avg_score": null, "num_lines": null }
# AnalogClock's base classes # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. from time import strftime, localtime from datetime import datetime import math import wx from styles import * #---------------------------------------------------------------------- _targets = [HOUR, MINUTE, SECOND, SWEEP] #---------------------------------------------------------------------- class Element: """Base class for face, hands and tick marks.""" def __init__(self, idx=0, pos=None, size=None, offset=0, clocksize=None, scale=1, rotate=False, kind=""): self.idx = idx self.pos = pos self.size = size self.offset = offset self.clocksize = clocksize self.scale = scale self.rotate = rotate self.kind = kind self.text = None self.angfac = [6, 30][self.kind == "hours"] def _pol2rect(self, m, t): return m * math.cos(math.radians(t)), m * math.sin(math.radians(t)) def _rect2pol(self, x, y): return math.hypot(x, y), math.degrees(math.atan2(y, x)) def DrawRotated(self, dc, offset=0): pass def DrawStraight(self, dc, offset=0): pass def Draw(self, dc, offset=0): if self.rotate: self.DrawRotated(dc, offset) else: self.DrawStraight(dc, offset) def RecalcCoords(self, clocksize, centre, scale): pass def GetSize(self): return self.size def GetOffset(self): return self.offset def GetIsRotated(self, rotate): return self.rotate def GetMaxSize(self, scale=1): return self.size * scale def GetScale(self): return self.scale def SetIsRotated(self, rotate): self.rotate = rotate def GetMaxSize(self, scale=1): return self.size * scale def GetPolygon(self): return self.polygon def SetPosition(self, pos): self.pos = pos def SetSize(self, size): self.size = size def SetOffset(self, offset): self.offset = offset def SetClockSize(self, clocksize): self.clocksize = clocksize def SetScale(self, scale): self.scale = scale def SetIsRotated(self, rotate): self.rotate = rotate def SetPolygon(self, polygon): self.polygon = polygon #---------------------------------------------------------------------- class ElementWithDyer(Element): """Base class for clock face and hands.""" def __init__(self, **kwargs): self.dyer = kwargs.pop("dyer", Dyer()) Element.__init__(self, **kwargs) def GetFillColour(self): return self.dyer.GetFillColour() def GetBorderColour(self): return self.dyer.GetBorderColour() def GetBorderWidth(self): return self.dyer.GetBorderWidth() def GetShadowColour(self): return self.dyer.GetShadowColour() def SetFillColour(self, colour): self.dyer.SetFillColour(colour) def SetBorderColour(self, colour): self.dyer.SetBorderColour(colour) def SetBorderWidth(self, width): self.dyer.SetBorderWidth(width) def SetShadowColour(self, colour): self.dyer.SetShadowColour(colour) #---------------------------------------------------------------------- class Face(ElementWithDyer): """Holds info about the clock face.""" def __init__(self, **kwargs): ElementWithDyer.__init__(self, **kwargs) def Draw(self, dc): self.dyer.Select(dc) dc.DrawCircle(self.pos.x, self.pos.y, self.radius) # print (self.pos.x, self.pos.y, self.pos.x, self.pos.y + self.radius) width = self.radius/20 dc.SetPen(wx.Pen("blue", width, wx.SOLID)) # dc.DrawLine(self.pos.x-width/2, self.pos.y, self.pos.x-width/2, self.pos.y - self.radius) dc.DrawLine(self.pos.x, self.pos.y, self.pos.x, self.pos.y - self.radius) def RecalcCoords(self, clocksize, centre, scale): self.radius = min(clocksize.Get()) / 2. - self.dyer.width / 2. self.pos = centre #---------------------------------------------------------------------- class Hand(ElementWithDyer): """Holds info about a clock hand.""" def __init__(self, **kwargs): self.lenfac = kwargs.pop("lenfac") ElementWithDyer.__init__(self, **kwargs) self.SetPolygon([[-1, 0], [0, -1], [1, 0], [0, 4]]) def Draw(self, dc, end, offset=0): radius, centre, r = end angle = math.degrees(r) polygon = self.polygon[:] vscale = radius / max([y for x, y in polygon]) for i, (x, y) in enumerate(polygon): x *= self.scale * self.size y *= vscale * self.lenfac m, t = self._rect2pol(x, y) polygon[i] = self._pol2rect(m, t - angle) dc.DrawPolygon(polygon, centre.x + offset, centre.y + offset) def RecalcCoords(self, clocksize, centre, scale): self.pos = centre self.scale = scale class SweepHand(Hand): def __init__(self, sweep_frequency=10, **kwargs): Hand.__init__(self, **kwargs) self.sweep_frequency = sweep_frequency self.SetPolygon([[-1, -1], [1, -1], [1, 10], [-1, 10]]) #---------------------------------------------------------------------- class TickSquare(Element): """Holds info about a tick mark.""" def __init__(self, **kwargs): Element.__init__(self, **kwargs) def Draw(self, dc, offset=0): width = height = self.size * self.scale x = self.pos.x - width / 2. y = self.pos.y - height / 2. dc.DrawRectangle(x + offset, y + offset, width, height) #---------------------------------------------------------------------- class TickCircle(Element): """Holds info about a tick mark.""" def __init__(self, **kwargs): Element.__init__(self, **kwargs) def Draw(self, dc, offset=0): radius = self.size * self.scale / 2. x = self.pos.x y = self.pos.y dc.DrawCircle(x + offset, y + offset, radius) #---------------------------------------------------------------------- class TickPoly(Element): """Holds info about a tick mark.""" def __init__(self, **kwargs): Element.__init__(self, **kwargs) self.SetPolygon([[0, 1], [1, 0], [2, 1], [1, 5]]) def _calcPolygon(self): width = max([x for x, y in self.polygon]) height = max([y for x, y in self.polygon]) tscale = self.size / max(width, height) * self.scale polygon = [(x * tscale, y * tscale) for x, y in self.polygon] width = max([x for x, y in polygon]) height = max([y for x, y in polygon]) return polygon, width, height def DrawStraight(self, dc, offset=0): polygon, width, height = self._calcPolygon() x = self.pos.x - width / 2. y = self.pos.y - height / 2. dc.DrawPolygon(polygon, x + offset, y + offset) def DrawRotated(self, dc, offset=0): polygon, width, height = self._calcPolygon() angle = 360 - self.angfac * (self.idx + 1) r = math.radians(angle) for i in range(len(polygon)): m, t = self._rect2pol(*polygon[i]) t -= angle polygon[i] = self._pol2rect(m, t) x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2. y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2. dc.DrawPolygon(polygon, x + offset, y + offset) #---------------------------------------------------------------------- class TickDecimal(Element): """Holds info about a tick mark.""" def __init__(self, **kwargs): Element.__init__(self, **kwargs) self.text = "%s" % (self.idx + 1) def DrawStraight(self, dc, offset=0): width, height = dc.GetTextExtent(self.text) x = self.pos.x - width / 2. y = self.pos.y - height / 2. dc.DrawText(self.text, x + offset, y + offset) def DrawRotated(self, dc, offset=0): width, height = dc.GetTextExtent(self.text) angle = 360 - self.angfac * (self.idx + 1) r = math.radians(angle) x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2. y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2. dc.DrawRotatedText(self.text, x + offset, y + offset, angle) #---------------------------------------------------------------------- class TickRoman(TickDecimal): """Holds info about a tick mark.""" def __init__(self, **kwargs): TickDecimal.__init__(self, **kwargs) self.text = ["I","II","III","IV","V", \ "VI","VII","VIII","IX","X", \ "XI","XII","XIII","XIV","XV", \ "XVI","XVII","XVIII","XIX","XX", \ "XXI","XXII","XXIII","XXIV","XXV", \ "XXVI","XXVII","XXVIII","XXIX","XXX", \ "XXXI","XXXII","XXXIII","XXXIV","XXXV", \ "XXXVI","XXXVII","XXXVIII","XXXIX","XL", \ "XLI","XLII","XLIII","XLIV","XLV", \ "XLVI","XLVII","XLVIII","XLIX","L", \ "LI","LII","LIII","LIV","LV", \ "LVI","LVII","LVIII","LIX","LX"][self.idx] #---------------------------------------------------------------------- class TickBinary(TickDecimal): """Holds info about a tick mark.""" def __init__(self, **kwargs): TickDecimal.__init__(self, **kwargs) def d2b(n, b=""): while n > 0: b = str(n % 2) + b; n = n >> 1 return b.zfill(4) self.text = d2b(self.idx + 1) #---------------------------------------------------------------------- class TickHex(TickDecimal): """Holds info about a tick mark.""" def __init__(self, **kwargs): TickDecimal.__init__(self, **kwargs) self.text = hex(self.idx + 1)[2:].upper() #---------------------------------------------------------------------- class TickNone(Element): """Holds info about a tick mark.""" def __init__(self, **kwargs): Element.__init__(self, **kwargs) def Draw(self, dc, offset=0): pass #---------------------------------------------------------------------- class Dyer: """Stores info about colours and borders of clock Elements.""" def __init__(self, border=None, width=0, fill=None, shadow=None): """ self.border (wx.Colour) border colour self.width (int) border width self.fill (wx.Colour) fill colour self.shadow (wx.Colour) shadow colour """ self.border = border or \ wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT) self.fill = fill or \ wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT) self.shadow = shadow or \ wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW) self.width = width def Select(self, dc, shadow=False): """Selects the current settings into the dc.""" if not shadow: dc.SetPen(wx.Pen(self.border, self.width, wx.SOLID)) dc.SetBrush(wx.Brush(self.fill, wx.SOLID)) dc.SetTextForeground(self.fill) else: dc.SetPen(wx.Pen(self.shadow, self.width, wx.SOLID)) dc.SetBrush(wx.Brush(self.shadow, wx.SOLID)) dc.SetTextForeground(self.shadow) def GetFillColour(self): return self.fill def GetBorderColour(self): return self.border def GetBorderWidth(self): return self.width def GetShadowColour(self): return self.shadow def SetFillColour(self, colour): self.fill = colour def SetBorderColour(self, colour): self.border = colour def SetBorderWidth(self, width): self.width = width def SetShadowColour(self, colour): self.shadow = colour #---------------------------------------------------------------------- class HandSet: """Manages the set of hands.""" def __init__(self, parent, h, m, s, w): self.parent = parent self.hands = [h, m, s, w] self.radius = 1 self.centre = wx.Point(1, 1) self.sweep_frequency = w.sweep_frequency self.last_second=0 self.flash_state = False def _draw(self, dc, shadow=False): ends = [int(x) for x in strftime("%I %M %S", localtime()).split()] ends.append(datetime.now().microsecond) second = datetime.now().microsecond if second < self.last_second: self.flash_state = not self.flash_state if self.flash_state: bg = (128,0,0) else: bg = (0,0,0) self.parent.Box.Face.SetFillColour(bg) self.parent.DrawBox() self.last_second = second flags = [self.parent.clockStyle & flag \ for flag in self.parent.allHandStyles] a_hand = self.hands[0] if shadow: offset = self.parent.shadowOffset * a_hand.GetScale() else: offset = 0 for i, hand in enumerate(self.hands): # Is this hand supposed to be drawn? if flags[i]: idx = ends[i] # Is this the hours hand? if i == 0: idx = idx * 5 + ends[1] / 12 - 1 elif i == 3: # sweep hand angle = math.radians( 180 - 6 * self.sweep_frequency * (ends[2] + (idx / 1000000.0))) # else prevent exceptions on leap seconds elif idx <= 0 or idx > 60: idx = 59 # and adjust idx offset for minutes and non-leap seconds else: idx = idx - 1 if i != 3: angle = math.radians(180 - 6 * (idx + 1)) hand.dyer.Select(dc, shadow) hand.Draw(dc, (self.radius, self.centre, angle), offset) def Draw(self, dc): if self.parent.clockStyle & SHOW_SHADOWS: self._draw(dc, True) self._draw(dc) def RecalcCoords(self, clocksize, centre, scale): self.centre = centre [hand.RecalcCoords(clocksize, centre, scale) for hand in self.hands] def SetMaxRadius(self, radius): self.radius = radius def GetSize(self, target): r = [] for i, hand in enumerate(self.hands): if _targets[i] & target: r.append(hand.GetSize()) return tuple(r) def GetFillColour(self, target): r = [] for i, hand in enumerate(self.hands): if _targets[i] & target: r.append(hand.GetFillColour()) return tuple(r) def GetBorderColour(self, target): r = [] for i, hand in enumerate(self.hands): if _targets[i] & target: r.append(hand.GetBorderColour()) return tuple(r) def GetBorderWidth(self, target): r = [] for i, hand in enumerate(self.hands): if _targets[i] & target: r.append(hand.GetBorderWidth()) return tuple(r) def GetShadowColour(self): r = [] for i, hand in enumerate(self.hands): if _targets[i] & target: r.append(hand.GetShadowColour()) return tuple(r) def SetSize(self, size, target): for i, hand in enumerate(self.hands): if _targets[i] & target: hand.SetSize(size) def SetFillColour(self, colour, target): for i, hand in enumerate(self.hands): if _targets[i] & target: hand.SetFillColour(colour) def SetBorderColour(self, colour, target): for i, hand in enumerate(self.hands): if _targets[i] & target: hand.SetBorderColour(colour) def SetBorderWidth(self, width, target): for i, hand in enumerate(self.hands): if _targets[i] & target: hand.SetBorderWidth(width) def SetShadowColour(self, colour): for i, hand in enumerate(self.hands): hand.SetShadowColour(colour) #---------------------------------------------------------------------- class TickSet: """Manages a set of tick marks.""" def __init__(self, parent, **kwargs): self.parent = parent self.dyer = Dyer() self.noe = {"minutes": 60, "hours": 12}[kwargs["kind"]] self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) style = kwargs.pop("style") self.kwargs = kwargs self.SetStyle(style) def _draw(self, dc, shadow=False): dc.SetFont(self.font) a_tick = self.ticks[0] if shadow: offset = self.parent.shadowOffset * a_tick.GetScale() else: offset = 0 clockStyle = self.parent.clockStyle for idx, tick in self.ticks.items(): draw = False # Are we a set of hours? if self.noe == 12: # Should we show all hours ticks? if clockStyle & SHOW_HOURS_TICKS: draw = True # Or is this tick a quarter and should we show only quarters? elif clockStyle & SHOW_QUARTERS_TICKS and not (idx + 1) % 3.: draw = True # Are we a set of minutes and minutes should be shown? elif self.noe == 60 and clockStyle & SHOW_MINUTES_TICKS: # If this tick occupies the same position of an hour/quarter # tick, should we still draw it anyway? if clockStyle & OVERLAP_TICKS: draw = True # Right, sir. I promise I won't overlap any tick. else: # Ensure that this tick won't overlap an hour tick. if clockStyle & SHOW_HOURS_TICKS: if (idx + 1) % 5.: draw = True # Ensure that this tick won't overlap a quarter tick. elif clockStyle & SHOW_QUARTERS_TICKS: if (idx + 1) % 15.: draw = True # We're not drawing quarters nor hours, so we can draw all # minutes ticks. else: draw = True if draw: tick.Draw(dc, offset) def Draw(self, dc): if self.parent.clockStyle & SHOW_SHADOWS: self.dyer.Select(dc, True) self._draw(dc, True) self.dyer.Select(dc) self._draw(dc) def RecalcCoords(self, clocksize, centre, scale): a_tick = self.ticks[0] size = a_tick.GetMaxSize(scale) maxsize = size # Try to find a 'good' max size for text-based ticks. if a_tick.text is not None: self.font.SetPointSize(size) dc = wx.MemoryDC() dc.SelectObject(wx.EmptyBitmap(*clocksize.Get())) dc.SetFont(self.font) maxsize = size for tick in self.ticks.values(): maxsize = max(*(dc.GetTextExtent(tick.text) + (maxsize,))) radius = self.radius = min(clocksize.Get()) / 2. - \ self.dyer.width / 2. - \ maxsize / 2. - \ a_tick.GetOffset() * scale - \ self.parent.shadowOffset * scale # If we are a set of hours, the number of elements of this tickset is # 12 and ticks are separated by a distance of 30 degrees; # if we are a set of minutes, the number of elements of this tickset is # 60 and ticks are separated by a distance of 6 degrees. angfac = [6, 30][self.noe == 12] for i, tick in self.ticks.items(): tick.SetClockSize(clocksize) tick.SetScale(scale) deg = 180 - angfac * (i + 1) angle = math.radians(deg) x = centre.x + radius * math.sin(angle) y = centre.y + radius * math.cos(angle) tick.SetPosition(wx.Point(x, y)) def GetSize(self): return self.kwargs["size"] def GetFillColour(self): return self.dyer.GetFillColour() def GetBorderColour(self): return self.dyer.GetBorderColour() def GetBorderWidth(self): return self.dyer.GetBorderWidth() def GetPolygon(self): a_tick = self.ticks.values()[0] return a_tick.GetPolygon() def GetFont(self): return self.font def GetOffset(self): a_tick = self.ticks[0] return a_tick.GetOffset() def GetShadowColour(self): return self.dyer.GetShadowColour() def GetIsRotated(self): a_tick = self.ticks[0] return a_tick.GetIsRotated() def GetStyle(self): return self.style def SetSize(self, size): self.kwargs["size"] = size [tick.SetSize(size) for tick in self.ticks.values()] def SetFillColour(self, colour): self.dyer.SetFillColour(colour) def SetBorderColour(self, colour): self.dyer.SetBorderColour(colour) def SetBorderWidth(self, width): self.dyer.SetBorderWidth(width) def SetPolygon(self, polygon): [tick.SetPolygon(polygon) for tick in self.ticks.values()] def SetFont(self, font): self.font = font def SetOffset(self, offset): self.kwargs["offset"] = offset [tick.SetOffset(offset) for tick in self.ticks.values()] def SetShadowColour(self, colour): self.dyer.SetShadowColour(colour) def SetIsRotated(self, rotate): self.kwargs["rotate"] = rotate [tick.SetIsRotated(rotate) for tick in self.ticks.values()] def SetStyle(self, style): self.style = style tickclass = allTickStyles[style] self.kwargs["rotate"] = self.parent.clockStyle & ROTATE_TICKS self.ticks = {} for i in range(self.noe): self.kwargs["idx"] = i self.ticks[i] = tickclass(**self.kwargs) #---------------------------------------------------------------------- class Box: """Gathers info about the clock face and tick sets.""" def __init__(self, parent, Face, TicksM, TicksH): self.parent = parent self.Face = Face self.TicksH = TicksH self.TicksM = TicksM def GetNiceRadiusForHands(self, centre): a_tick = self.TicksM.ticks[0] scale = a_tick.GetScale() bw = max(self.TicksH.dyer.width / 2. * scale, self.TicksM.dyer.width / 2. * scale) mgt = self.TicksM.ticks[59] my = mgt.pos.y + mgt.GetMaxSize(scale) + bw hgt = self.TicksH.ticks[11] hy = hgt.pos.y + hgt.GetMaxSize(scale) + bw niceradius = centre.y - max(my, hy) return niceradius def Draw(self, dc): [getattr(self, attr).Draw(dc) \ for attr in ["Face", "TicksM", "TicksH"]] def RecalcCoords(self, size, centre, scale): [getattr(self, attr).RecalcCoords(size, centre, scale) \ for attr in ["Face", "TicksH", "TicksM"]] def GetTickSize(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetSize()) return tuple(r) def GetTickFillColour(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetFillColour()) return tuple(r) def GetTickBorderColour(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetBorderColour()) return tuple(r) def GetTickBorderWidth(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetBorderWidth()) return tuple(r) def GetTickPolygon(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetPolygon()) return tuple(r) def GetTickFont(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetFont()) return tuple(r) def GetIsRotated(self): a_tickset = self.TicksH return a_tickset.GetIsRotated() def GetTickOffset(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetOffset()) return tuple(r) def GetShadowColour(self): a_tickset = self.TicksH return a_tickset.GetShadowColour() def GetTickStyle(self, target): r = [] for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) r.append(tick.GetStyle()) return tuple(r) def SetTickSize(self, size, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetSize(size) def SetTickFillColour(self, colour, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetFillColour(colour) def SetTickBorderColour(self, colour, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetBorderColour(colour) def SetTickBorderWidth(self, width, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetBorderWidth(width) def SetTickPolygon(self, polygon, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetPolygon(polygon) def SetTickFont(self, font, target): fs = font.GetNativeFontInfoDesc() for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetFont(wx.FontFromNativeInfoString(fs)) def SetIsRotated(self, rotate): [getattr(self, attr).SetIsRotated(rotate) \ for attr in ["TicksH", "TicksM"]] def SetTickOffset(self, offset, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetOffset(offset) def SetShadowColour(self, colour): for attr in ["TicksH", "TicksM"]: tick = getattr(self, attr) tick.SetShadowColour(colour) def SetTickStyle(self, style, target): for i, attr in enumerate(["TicksH", "TicksM"]): if _targets[i] & target: tick = getattr(self, attr) tick.SetStyle(style) #---------------------------------------------------------------------- # Relationship between styles and ticks class names. allTickStyles = {TICKS_BINARY: TickBinary, TICKS_CIRCLE: TickCircle, TICKS_DECIMAL: TickDecimal, TICKS_HEX: TickHex, TICKS_NONE: TickNone, TICKS_POLY: TickPoly, TICKS_ROMAN: TickRoman, TICKS_SQUARE: TickSquare} # ## ### eof
{ "repo_name": "CarlFK/clocky", "path": "analogclock/helpers.py", "copies": "1", "size": "29716", "license": "mit", "hash": -4782367176190227000, "line_mean": 27.1333333333, "line_max": 99, "alpha_frac": 0.4944474357, "autogenerated": false, "ratio": 3.9500199388541803, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.494446737455418, "avg_score": null, "num_lines": null }
# AnalogClock's colour selector for setup dialog # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. import wx from wx.lib.newevent import NewEvent from wx.lib.buttons import GenBitmapButton #---------------------------------------------------------------------------- (ColourSelectEvent, EVT_COLOURSELECT) = NewEvent() #---------------------------------------------------------------------------- class ColourSelect(GenBitmapButton): def __init__(self, parent, size=(21, 21), value=wx.BLACK): w, h = size[0] - 5, size[1] - 5 GenBitmapButton.__init__(self, parent, wx.ID_ANY, wx.EmptyBitmap(w, h), size=size) self.SetBezelWidth(1) self.parent = parent self.SetValue(value) self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self) def _makeBitmap(self): bdr = 8; w, h = self.GetSize() bmp = wx.EmptyBitmap(w - bdr, h - bdr) dc = wx.MemoryDC() dc.SelectObject(bmp) dc.SetBackground(wx.Brush(self.value, wx.SOLID)) dc.Clear() dc.SelectObject(wx.NullBitmap) self.SetBitmapLabel(bmp) self.Refresh() def GetValue(self): return self.value def SetValue(self, value): self.value = value self._makeBitmap() def OnClick(self, event): win = wx.GetTopLevelParent(self) data = wx.ColourData() data.SetChooseFull(True) data.SetColour(self.value) [data.SetCustomColour(colour_index, win.customcolours[colour_index]) for colour_index in range(0, 16)] dlg = wx.ColourDialog(win, data) dlg.SetTitle("Select Colour") changed = dlg.ShowModal() == wx.ID_OK if changed: data = dlg.GetColourData() self.SetValue(data.GetColour()) win.customcolours = [data.GetCustomColour(colour_index) \ for colour_index in range(0, 16)] dlg.Destroy() if changed: nevt = ColourSelectEvent(id=self.GetId(), obj=self, val=self.value) wx.PostEvent(self.parent, nevt) # ## ### eof
{ "repo_name": "ktan2020/legacy-automation", "path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/lib_setup/colourselect.py", "copies": "2", "size": "2342", "license": "mit", "hash": 1193119777742407000, "line_mean": 27.275, "line_max": 79, "alpha_frac": 0.535439795, "autogenerated": false, "ratio": 3.6651017214397497, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.004263127669420825, "num_lines": 80 }
# AnalogClock's font selector for setup dialog # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. import wx from wx.lib.newevent import NewEvent from wx.lib.buttons import GenButton #---------------------------------------------------------------------------- (FontSelectEvent, EVT_FONTSELECT) = NewEvent() #---------------------------------------------------------------------------- class FontSelect(GenButton): def __init__(self, parent, size=(75, 21), value=None): GenButton.__init__(self, parent, wx.ID_ANY, label="Select...", size=size) self.SetBezelWidth(1) self.parent = parent self.SetValue(value) self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self) def GetValue(self): return self.value def SetValue(self, value): if value is None: value = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) self.value = value def OnClick(self, event): data = wx.FontData() data.EnableEffects(False) font = self.value; font.SetPointSize(10) data.SetInitialFont(font) dlg = wx.FontDialog(self, data) changed = dlg.ShowModal() == wx.ID_OK if changed: data = dlg.GetFontData() self.value = data.GetChosenFont() self.Refresh() dlg.Destroy() if changed: nevt = FontSelectEvent(id=self.GetId(), obj=self, val=self.value) wx.PostEvent(self.parent, nevt) # ## ### eof
{ "repo_name": "ktan2020/legacy-automation", "path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/lib_setup/fontselect.py", "copies": "2", "size": "1713", "license": "mit", "hash": 3042094908907154000, "line_mean": 26.0819672131, "line_max": 77, "alpha_frac": 0.5253940455, "autogenerated": false, "ratio": 3.764835164835165, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5290229210335164, "avg_score": null, "num_lines": null }
# AnalogClock's main class # E. A. Tacao <e.a.tacao |at| estadao.com.br> # http://j.domaindlx.com/elements28/wxpython/ # 15 Fev 2006, 22:00 GMT-03:00 # Distributed under the wxWidgets license. # # For more info please see the __init__.py file. import wx from styles import * from helpers import Dyer, Face, Hand, HandSet, TickSet, Box from setup import Setup #---------------------------------------------------------------------- class AnalogClock(wx.PyWindow): """An analog clock.""" def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.NO_BORDER, name="AnalogClock", clockStyle=DEFAULT_CLOCK_STYLE, minutesStyle=TICKS_CIRCLE, hoursStyle=TICKS_POLY): wx.PyWindow.__init__(self, parent, id, pos, size, style, name) # Base size for scale calc purposes. self.basesize = wx.Size(348, 348) # Store some references. self.clockStyle = clockStyle self.minutesStyle = minutesStyle self.hoursStyle = hoursStyle self.DrawHands = self._drawHands self.DrawBox = self._drawBox self.RecalcCoords = self._recalcCoords self.shadowOffset = 3 self.allHandStyles = [SHOW_HOURS_HAND, SHOW_MINUTES_HAND, SHOW_SECONDS_HAND] # Initialize clock face. # # By default we don't use colours or borders on the clock face. bg = self.GetBackgroundColour() face = Face(dyer=Dyer(bg, 0, bg)) # Initialize tick marks. # # TickSet is a set of tick marks; there's always two TickSets defined # regardless whether they're being shown or not. ticksM = TickSet(self, style=minutesStyle, size=5, kind="minutes") ticksH = TickSet(self, style=hoursStyle, size=25, kind="hours", rotate=clockStyle&ROTATE_TICKS) # Box holds the clock face and tick marks. self.Box = Box(self, face, ticksM, ticksH) # Initialize hands. # # HandSet is the set of hands; there's always one HandSet defined # regardless whether hands are being shown or not. # # A 'lenfac = 0.95', e.g., means that the lenght of that hand will # be 95% of the maximum allowed hand lenght ('nice' maximum lenght). handH = Hand(size=7, lenfac=0.7) handM = Hand(size=5, lenfac=0.95) handS = Hand(size=1, lenfac=0.95) self.Hands = HandSet(self, handH, handM, handS) # Create the customization dialog. self.Setup = None # Make a context menu. popup1 = wx.NewId() popup2 = wx.NewId() cm = self.cm = wx.Menu() cm.Append(popup1, "Customize...") cm.Append(popup2, "About...") # Set event handlers. self.Bind(wx.EVT_SIZE, self._OnSize) self.Bind(wx.EVT_PAINT, self._OnPaint) self.Bind(wx.EVT_ERASE_BACKGROUND, lambda evt: None) self.Bind(wx.EVT_TIMER, self._OnTimer) self.Bind(wx.EVT_WINDOW_DESTROY, self._OnDestroyWindow) self.Bind(wx.EVT_CONTEXT_MENU, self._OnContextMenu) self.Bind(wx.EVT_MENU, self._OnShowSetup, id=popup1) self.Bind(wx.EVT_MENU, self._OnShowAbout, id=popup2) # Set initial size based on given size, or best size self.SetInitialSize(size) # Do initial drawing (in case there is not an initial size event) self.RecalcCoords(self.GetSize()) self.DrawBox() # Initialize the timer that drives the update of the clock face. # Update every half second to ensure that there is at least one true # update during each realtime second. self.timer = wx.Timer(self) self.timer.Start(500) def DoGetBestSize(self): # Just pull a number out of the air. If there is a way to # calculate this then it should be done... size = wx.Size(50,50) self.CacheBestSize(size) return size def _OnSize(self, evt): size = self.GetClientSize() if size.x < 1 or size.y < 1: return self.RecalcCoords(size) self.DrawBox() def _OnPaint(self, evt): dc = wx.BufferedPaintDC(self) self.DrawHands(dc) def _OnTimer(self, evt): self.Refresh(False) self.Update() def _OnDestroyWindow(self, evt): self.timer.Stop() del self.timer def _OnContextMenu(self, evt): self.PopupMenu(self.cm) def _OnShowSetup(self, evt): if self.Setup is None: self.Setup = Setup(self) self.Setup.Show() self.Setup.Raise() def _OnShowAbout(self, evt): msg = "AnalogClock\n\n" \ "by Several folks on wxPython-users\n" \ "with enhancements from E. A. Tacao." title = "About..." style = wx.OK|wx.ICON_INFORMATION dlg = wx.MessageDialog(self, msg, title, style) dlg.ShowModal() dlg.Destroy() def _recalcCoords(self, size): """ Recalculates all coordinates/geometry and inits the faceBitmap to make sure the buffer is always the same size as the window. """ self.faceBitmap = wx.EmptyBitmap(*size.Get()) # Recalc all coords. scale = min([float(size.width) / self.basesize.width, float(size.height) / self.basesize.height]) centre = wx.Point(size.width / 2., size.height / 2.) self.Box.RecalcCoords(size, centre, scale) self.Hands.RecalcCoords(size, centre, scale) # Try to find a 'nice' maximum length for the hands so that they won't # overlap the tick marks. OTOH, if you do want to allow overlapping the # lenfac value (defined on __init__ above) has to be set to # something > 1. niceradius = self.Box.GetNiceRadiusForHands(centre) self.Hands.SetMaxRadius(niceradius) def _drawBox(self): """Draws clock face and tick marks onto the faceBitmap.""" dc = wx.BufferedDC(None, self.faceBitmap) dc.SetBackground(wx.Brush(self.GetBackgroundColour(), wx.SOLID)) dc.Clear() self.Box.Draw(dc) def _drawHands(self, dc): """ Draws the face bitmap, created on the last DrawBox call, and clock hands. """ dc.DrawBitmap(self.faceBitmap, 0, 0) self.Hands.Draw(dc) # Public methods -------------------------------------------------- def GetHandSize(self, target=ALL): """Gets thickness of hands.""" return self.Hands.GetSize(target) def GetHandFillColour(self, target=ALL): """Gets fill colours of hands.""" return self.Hands.GetFillColour(target) def GetHandBorderColour(self, target=ALL): """Gets border colours of hands.""" return self.Hands.GetBorderColour(target) def GetHandBorderWidth(self, target=ALL): """Gets border widths of hands.""" return self.Hands.GetBorderWidth(target) def GetTickSize(self, target=ALL): """Gets sizes of ticks.""" return self.Box.GetTickSize(target) def GetTickFillColour(self, target=ALL): """Gets fill colours of ticks.""" return self.Box.GetTickFillColour(target) def GetTickBorderColour(self, target=ALL): """Gets border colours of ticks.""" return self.Box.GetTickBorderColour(target) def GetTickBorderWidth(self, target=ALL): """Gets border widths of ticks.""" return self.Box.GetTickBorderWidth(target) def GetTickPolygon(self, target=ALL): """ Gets lists of points to be used as polygon shapes when using the TICKS_POLY style. """ return self.Box.GetTickPolygon(target) def GetTickFont(self, target=ALL): """ Gets fonts for tick marks when using TICKS_DECIMAL or TICKS_ROMAN style. """ return self.Box.GetTickFont(target) def GetTickOffset(self, target=ALL): """Gets the distance of tick marks for hours from border.""" return self.Box.GetTickOffset(target) def GetFaceFillColour(self): """Gets fill colours of watch.""" return self.Box.Face.GetFillColour() def GetFaceBorderColour(self): """Gets border colours of watch.""" return self.Box.Face.GetBorderColour() def GetFaceBorderWidth(self): """Gets border width of watch.""" return self.Box.Face.GetBorderWidth() def GetShadowColour(self): """Gets the colour to be used to draw shadows.""" a_clock_part = self.Box return a_clock_part.GetShadowColour() def GetClockStyle(self): """Returns the current clock style.""" return self.clockStyle def GetTickStyle(self, target=ALL): """Gets the tick style(s).""" return self.Box.GetTickStyle(target) def Reset(self): """ Forces an immediate recalculation and redraw of all clock elements. """ size = self.GetClientSize() if size.x < 1 or size.y < 1: return self.RecalcCoords(size) self.DrawBox() self.Refresh(False) def SetHandSize(self, size, target=ALL): """Sets thickness of hands.""" self.Hands.SetSize(size, target) def SetHandFillColour(self, colour, target=ALL): """Sets fill colours of hands.""" self.Hands.SetFillColour(colour, target) def SetHandBorderColour(self, colour, target=ALL): """Sets border colours of hands.""" self.Hands.SetBorderColour(colour, target) def SetHandBorderWidth(self, width, target=ALL): """Sets border widths of hands.""" self.Hands.SetBorderWidth(width, target) def SetTickSize(self, size, target=ALL): """Sets sizes of ticks.""" self.Box.SetTickSize(size, target) self.Reset() def SetTickFillColour(self, colour, target=ALL): """Sets fill colours of ticks.""" self.Box.SetTickFillColour(colour, target) self.Reset() def SetTickBorderColour(self, colour, target=ALL): """Sets border colours of ticks.""" self.Box.SetTickBorderColour(colour, target) self.Reset() def SetTickBorderWidth(self, width, target=ALL): """Sets border widths of ticks.""" self.Box.SetTickBorderWidth(width, target) self.Reset() def SetTickPolygon(self, polygon, target=ALL): """ Sets lists of points to be used as polygon shapes when using the TICKS_POLY style. """ self.Box.SetTickPolygon(polygon, target) self.Reset() def SetTickFont(self, font, target=ALL): """ Sets fonts for tick marks when using text-based tick styles such as TICKS_DECIMAL or TICKS_ROMAN. """ self.Box.SetTickFont(font, target) self.Reset() def SetTickOffset(self, offset, target=ALL): """Sets the distance of tick marks for hours from border.""" self.Box.SetTickOffset(offset, target) self.Reset() def SetFaceFillColour(self, colour): """Sets fill colours of watch.""" self.Box.Face.SetFillColour(colour) self.Reset() def SetFaceBorderColour(self, colour): """Sets border colours of watch.""" self.Box.Face.SetBorderColour(colour) self.Reset() def SetFaceBorderWidth(self, width): """Sets border width of watch.""" self.Box.Face.SetBorderWidth(width) self.Reset() def SetShadowColour(self, colour): """Sets the colour to be used to draw shadows.""" self.Hands.SetShadowColour(colour) self.Box.SetShadowColour(colour) self.Reset() def SetClockStyle(self, style): """ Set the clock style, according to the options below. ==================== ================================ SHOW_QUARTERS_TICKS Show marks for hours 3, 6, 9, 12 SHOW_HOURS_TICKS Show marks for all hours SHOW_MINUTES_TICKS Show marks for minutes SHOW_HOURS_HAND Show hours hand SHOW_MINUTES_HAND Show minutes hand SHOW_SECONDS_HAND Show seconds hand SHOW_SHADOWS Show hands and marks shadows ROTATE_TICKS Align tick marks to watch OVERLAP_TICKS Draw tick marks for minutes even when they match the hours marks. ==================== ================================ """ self.clockStyle = style self.Box.SetIsRotated(style & ROTATE_TICKS) self.Reset() def SetTickStyle(self, style, target=ALL): """ Set the tick style, according to the options below. ================= ====================================== TICKS_NONE Don't show tick marks. TICKS_SQUARE Use squares as tick marks. TICKS_CIRCLE Use circles as tick marks. TICKS_POLY Use a polygon as tick marks. A polygon can be passed using SetTickPolygon, otherwise the default polygon will be used. TICKS_DECIMAL Use decimal numbers as tick marks. TICKS_ROMAN Use Roman numbers as tick marks. TICKS_BINARY Use binary numbers as tick marks. TICKS_HEX Use hexadecimal numbers as tick marks. ================= ====================================== """ self.Box.SetTickStyle(style, target) self.Reset() def SetBackgroundColour(self, colour): """Overriden base wx.Window method.""" wx.Window.SetBackgroundColour(self, colour) self.Reset() def SetForegroundColour(self, colour): """ Overriden base wx.Window method. This method sets a colour for all hands and ticks at once. """ wx.Window.SetForegroundColour(self, colour) self.SetHandFillColour(colour) self.SetHandBorderColour(colour) self.SetTickFillColour(colour) self.SetTickBorderColour(colour) self.Reset() def SetWindowStyle(self, *args, **kwargs): """Overriden base wx.Window method.""" size = self.GetSize() self.Freeze() wx.Window.SetWindowStyle(self, *args, **kwargs) self.SetSize((10, 10)) self.SetSize(size) self.Thaw() def SetWindowStyleFlag(self, *args, **kwargs): """Overriden base wx.Window method.""" self.SetWindowStyle(*args, **kwargs) # For backwards compatibility ----------------------------------------- class AnalogClockWindow(AnalogClock): """ A simple derived class that provides some backwards compatibility with the old analogclock module. """ def SetTickShapes(self, tsh, tsm=None): self.SetTickPolygon(tsh) def SetHandWeights(self, h=None, m=None, s=None): if h: self.SetHandSize(h, HOUR) if m: self.SetHandSize(m, MINUTE) if s: self.SetHandSize(s, SECOND) def SetHandColours(self, h=None, m=None, s=None): if h and not m and not s: m=h s=h if h: self.SetHandBorderColour(h, HOUR) self.SetHandFillColour(h, HOUR) if m: self.SetHandBorderColour(m, MINUTE) self.SetHandFillColour(m, MINUTE) if s: self.SetHandBorderColour(s, SECOND) self.SetHandFillColour(s, SECOND) def SetTickColours(self, h=None, m=None): if not m: m=h if h: self.SetTickBorderColour(h, HOUR) self.SetTickFillColour(h, HOUR) if m: self.SetTickBorderColour(m, MINUTE) self.SetTickFillColour(m, MINUTE) def SetTickSizes(self, h=None, m=None): if h: self.SetTickSize(h, HOUR) if m: self.SetTickSize(m, MINUTE) def SetTickFontss(self, h=None, m=None): if h: self.SetTickFont(h, HOUR) if m: self.SetTickFont(m, MINUTE) def SetMinutesOffset(self, o): pass def SetShadowColour(self, s): pass def SetWatchPenBrush(self, p=None, b=None): if p: self.SetFaceBorderColour(p.GetColour()) self.SetFaceBorderWidth(p.GetWidth()) if b: self.SetFaceFillColour(b.GetColour()) def SetClockStyle(self, style): style |= SHOW_HOURS_HAND|SHOW_MINUTES_HAND|SHOW_SECONDS_HAND AnalogClock.SetClockStyle(self, style) def SetTickStyles(self, h=None, m=None): if h: self.SetTickStyle(h, HOUR) if m: self.SetTickStyle(h, MINUTE) # Test stuff ---------------------------------------------------------- if __name__ == "__main__": print wx.VERSION_STRING class AcDemoApp(wx.App): def OnInit(self): frame = wx.Frame(None, -1, "AnalogClock", size=(375, 375)) clock = AnalogClock(frame) frame.CentreOnScreen() frame.Show() return True acApp = AcDemoApp(0) acApp.MainLoop() # ## ### eof
{ "repo_name": "ktan2020/legacy-automation", "path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/analogclock.py", "copies": "1", "size": "18239", "license": "mit", "hash": -1111179114490905500, "line_mean": 26.8135860979, "line_max": 79, "alpha_frac": 0.5543067054, "autogenerated": false, "ratio": 4.008571428571429, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.004162301028772856, "num_lines": 633 }
# ANALOG COLORS # Import the library try: # This is the statement you normally use. colors = ximport("colors") except ImportError: # But since these examples are "inside" the library # we may need to try something different when # the library is not located in /Application Support colors = ximport("__init__") reload(colors) size(600, 600) nofill() stroke(0.4, 0.5, 0) strokewidth(0.1) autoclosepath(False) clr = colors.color(0.6, 0.4, 0) # Get a very dark variation of the color for the background. background(colors.dark(clr).darken(0.1)) clr.alpha = 0.5 # Each curve has a shadow and there are a lot of them, # so we have to use a very subtle shadow: # very transparent and thin (little blur). colors.shadow(alpha=0.05, blur=0.2) for i in range(50): # Each strand of curves has an analogous color # (i.e. hues that are next to each other on the color wheel). # This yields a very natural effect. stroke(clr.analog(angle=10, d=0.3)) # Start drawing strands of curves from the center. x0 = WIDTH/2 y0 = HEIGHT/2 # Each strand of curves bends in a certain way. vx0 = random(-200, 200) vy0 = random(-200, 200) vx1 = random(-200, 200) vy1 = random(-200, 200) # A strand ends up either left or right outside the screen. # Each curve in a strand ends up at the same place # (identical x1 and y1). x1 = choice((-10, WIDTH)) y1 = random(HEIGHT) # This code gives interesting effects as well: #from math import radians, sin, cos #angle = random(360) #x1 = x0 + cos(radians(angle)) * 100 #y1 = y0 + sin(radians(angle)) * 100 for j in range(100): beginpath(x0, y0) curveto( # The bend of each curve in a strand differs slightly # at the start, so the strand looks thicker at the start # and then all the curves come together at x1 and y1. x0+vx0+random(80), y0+vy0+random(80), x1+vx1, y1+vy1, x1, y1 ) endpath() """ # Some type, with a heart symbol! heart = u"\u2665" s1 = "strands of analogous curves "+heart s2 = "gratuitous type always looks cool on these things" fill(1, 1, 1, 0.85) fontsize(18) text(s1, 65, HEIGHT/2) fontsize(9) text(s2.upper(), 65, HEIGHT/2+12) stroke(1) strokewidth(1) line(0, HEIGHT/2, 60, HEIGHT/2) """
{ "repo_name": "est/nodebox-gl", "path": "libraries/colors/colors_example1.py", "copies": "2", "size": "2409", "license": "bsd-3-clause", "hash": 4561374311716880000, "line_mean": 27.3529411765, "line_max": 68, "alpha_frac": 0.6272312163, "autogenerated": false, "ratio": 3.1204663212435233, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.966146134064553, "avg_score": 0.017247239379598867, "num_lines": 85 }
"""Analog console entry point.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import argparse import sys import textwrap import analog from analog.analyzer import DEFAULT_VERBS, DEFAULT_STATUS_CODES, DEFAULT_PATHS from analog.utils import AnalogArgumentParser def main(argv=None): """ analog - Log Analysis Utility. Name the logfile to analyze (positional argument) or leave it out to read from ``stdin``. This can be handy for piping in filtered logfiles (e.g. with ``grep``). Select the logfile format subcommand that suits your needs or define a custom log format using ``analog custom --pattern-regex <...> --time-format <...>``. To analyze for the logfile for specified paths, provide them via ``--path`` arguments (mutliple times). Also, monitoring specifig HTTP verbs (request methods) via ``--verb`` and specific response status codes via ``--status`` argument(s) is possible. Paths and status codes all match the start of the actual log entry values. Thus, specifying a path ``/foo`` will group all paths beginning with that value. Arguments can be listed in a file by specifying ``@argument_file.txt`` as parameter. """ parser = AnalogArgumentParser( description=textwrap.dedent(main.__doc__.replace('``', "'")), formatter_class=argparse.RawDescriptionHelpFormatter, fromfile_prefix_chars='@') format_choices = analog.LogFormat.all_formats() output_choices = sorted(analog.Renderer.all_renderers().keys()) # --version parser.add_argument('--version', action='version', version="analog {v}".format(v=analog.__version__)) # common arguments common = argparse.ArgumentParser(add_help=False) # -o / --output_format common.add_argument('-o', '--output-format', action='store', dest='output_format', default='plain', choices=output_choices, help="output format") # -p / --path common.add_argument('-p', '--path', action='append', dest='paths', default=DEFAULT_PATHS, help="paths to monitor (repeat for multiple)") # -v / --verb common.add_argument('-v', '--verb', action='append', dest='verbs', default=DEFAULT_VERBS, help="verbs to monitor (repeat for multiple)") # -s / --status common.add_argument('-s', '--status', action='append', dest='status_codes', default=DEFAULT_STATUS_CODES, help="status codes to monitor (repeat for multiple)") # -a / --max_age common.add_argument('-a', '--max-age', action='store', type=int, default=None, help="analyze logs until n minutes age") # -ps / --path_stats common.add_argument('-ps', '--path-stats', action='store_true', dest='path_stats', help="include statistics per path") # -t / --timing common.add_argument('-t', '--timing', action='store_true', help="print timing") # logfile, defaults to stdin common.add_argument('log', action='store', nargs='?', type=argparse.FileType('r'), default='-', help="logfile to analyze." "Defaults to stdin for piping.") # subcommands for predefined log formats format_parsers = parser.add_subparsers( title="log format", description="analyze logfiles of a certain format", metavar='FORMAT', dest='format') for format in format_choices: format_parsers.add_parser(format, parents=[common], help="{} log format".format(format)) # subcommand for custom log format custom_format = format_parsers.add_parser('custom', parents=[common], help="custom log format") # -pr / --pattern-regex custom_format.add_argument('-pr', '--pattern-regex', action='store', dest='pattern', required=True, help='regex format pattern with named groups.') # -tf / --time-format custom_format.add_argument('-tf', '--time-format', action='store', dest='time_format', required=True, help='timestamp format (strftime compatible)') try: if argv is None: # pragma: no cover argv = sys.argv args = parser.parse_args(argv[1:]) format_kwargs = {'format': args.format} if args.format == 'custom': format_kwargs.update({ 'pattern': args.pattern, 'time_format': args.time_format, }) # analyze logfile and generate report analog.analyze(log=args.log, paths=args.paths, verbs=args.verbs, status_codes=args.status_codes, max_age=args.max_age, path_stats=args.path_stats, timing=args.timing, output_format=args.output_format, **format_kwargs) parser.exit(0) except analog.AnalogError as exc: parser.error(str(exc)) except KeyboardInterrupt: parser.exit(1, "\nExecution cancelled.")
{ "repo_name": "fabianbuechler/analog", "path": "analog/main.py", "copies": "1", "size": "6057", "license": "mit", "hash": -1268754877132283400, "line_mean": 37.335443038, "line_max": 79, "alpha_frac": 0.5159319795, "autogenerated": false, "ratio": 4.900485436893204, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5916417416393205, "avg_score": null, "num_lines": null }
from machine import ADC from uiot.device import Device class Analog(Device): # Handle devices connected to the analog port # offers a digital mode through using threshold def __init__(self, name, precision=1, threshold=None, on_change=None, report_change=True, filter=None): self.precision = precision self.threshold = None if threshold is not None: self.threshold = max(1, min(threshold, 1023)) self.last_value = None Device.__init__(self, name, ADC(0), on_change=on_change, report_change=report_change, filter=filter) def measure(self): value = self.port.read() if self.last_value is None \ or abs(value - self.last_value) >= self.precision: self.last_value = value if self.threshold is None: return self.last_value # just return value else: if self.last_value > self.threshold - self.precision: # behave like a digital sensor return 1 else: return 0 return self.last_value
{ "repo_name": "ulno/micropython-extra-ulno", "path": "lib/node_types/esp8266/freeze/uiot/analog.py", "copies": "1", "size": "1208", "license": "mit", "hash": -4433291726577275400, "line_mean": 33.5142857143, "line_max": 100, "alpha_frac": 0.5811258278, "autogenerated": false, "ratio": 4.253521126760563, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005657708628005657, "num_lines": 35 }
"""Analog - Log Analysis Utitliy.""" from __future__ import (absolute_import, division, print_function, unicode_literals) from setuptools import setup, find_packages VERSION = '1.0.1+dev' def read(path, strip=False): """Read file at ``path`` and return content. Opt., ``strip`` whitespace.""" content = '' with open(path) as fp: content = fp.read() if strip: content = content.strip() return content classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Topic :: Internet :: Log Analysis', 'Topic :: System :: Logging', 'Topic :: System :: Monitoring', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', 'Programming Language :: Python :: Implementation :: PyPy'] + [ 'Programming Language :: Python :: {0}'.format(pyv) for pyv in '2.7 3.2 3.3 3.4'.split() ] requirements = ['tabulate'] # unittest.mock (3.3+) or mock try: import unittest.mock del unittest.mock except ImportError: requirements.append('mock') setup( name='analog', description='analog - Log Analysis Utility', long_description=read('README.rst') + '\n\n' + read('CHANGELOG.rst'), version=VERSION, url='https://github.com/fabianbuechler/analog', license='MIT license', author='Fabian B\xfcchler', author_email='fabian.buechler@gmail.com', entry_points={'console_scripts': ['analog=analog:main']}, classifiers=classifiers, install_requires=requirements, packages=find_packages(), py_modules=['analog'], zip_safe=False, )
{ "repo_name": "fabianbuechler/analog", "path": "setup.py", "copies": "1", "size": "1844", "license": "mit", "hash": 4911590560373688000, "line_mean": 28.2698412698, "line_max": 79, "alpha_frac": 0.6339479393, "autogenerated": false, "ratio": 3.794238683127572, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9928186622427573, "avg_score": 0, "num_lines": 63 }
"""Analog - Log Analysis Utitliy.""" # Analog documentation build configuration file, created by # sphinx-quickstart on Sat Jan 18 20:05:36 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import (absolute_import, division, print_function, unicode_literals) import os import analog # on_rtd is whether we are on readthedocs.org, # this line of code grabbed from docs.readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # otherwise, readthedocs.org uses their theme by default, so no need to # specify it # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Analog' copyright = '2014, Fabian Büchler' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '.'.join(analog.__version__.split('.')[:2]) # The full version, including alpha/beta/rc tags. release = analog.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "Analog - Log Analysis Utility" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "Analog" # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Analogdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Analog.tex', 'Analog Documentation', 'Fabian Büchler', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'analog', 'Analog Documentation', ['Fabian Büchler'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Analog', 'Analog Documentation', 'Fabian Büchler', 'Analog', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
{ "repo_name": "fabianbuechler/analog", "path": "docs/conf.py", "copies": "1", "size": "8483", "license": "mit", "hash": -342896157558164400, "line_mean": 30.9962264151, "line_max": 79, "alpha_frac": 0.7012619413, "autogenerated": false, "ratio": 3.7335975341259355, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9904884129313007, "avg_score": 0.005995069222585398, "num_lines": 265 }
"""Analog log format definitions.""" from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import namedtuple import re import weakref from analog.exceptions import InvalidFormatExpressionError class LogFormat: """Log format definition. Represents log format recognition patterns by name. A name:format mapping of all defined log format patterns can be retrieved using :py:meth:`analog.formats.LogFormat.all_formats`. Each log format should at least define the following match groups: * ``timestamp``: Local time. * ``verb``: HTTP verb (GET, POST, PUT, ...). * ``path``: Request path. * ``status``: Response status code. * ``body_bytes_sent``: Body size in bytes. * ``request_time``: Request time. * ``upstream_response_time``: Upstream response time. """ #: pool of all predefined log formats __formats_ = {} #: required pattern groups _required_attributes = ('timestamp', 'verb', 'path', 'status', 'body_bytes_sent', 'request_time', 'upstream_response_time') def __init__(self, name, pattern, time_format): """Describe log format. The format ``pattern`` is a (verbose) regex pattern string specifying the log entry attributes as named groups that is compiled into a :py:class:`re.Pattern` object. All pattern group names are be available as attributes of log entries when using a :py:meth:`analog.formats.LogEntry.entry`. :param name: log format name. :type name: ``str`` :param pattern: regular expression pattern string. :type pattern: raw ``str`` :param time_format: timestamp parsing pattern. :type time_format: ``str`` :raises: :py:class:`analog.exceptions.InvalidFormatExpressionError` if missing required format pattern groups or the pattern is not a valid regular expression. """ self.__formats_[name] = weakref.ref(self) self.name = name try: self.pattern = re.compile(pattern, re.UNICODE | re.VERBOSE) except re.error: raise InvalidFormatExpressionError("Invalid regex in format.") attributes = self.pattern.groupindex.keys() for attr in self._required_attributes: if attr not in attributes: raise InvalidFormatExpressionError( "Format pattern must at least define the groups: " "{0}.".format(", ".join(self._required_attributes))) self.time_format = time_format self._entry = namedtuple( 'LogEntry{0}'.format(name.title()), sorted(self.pattern.groupindex, key=self.pattern.groupindex.get)) def entry(self, match): """Convert regex match object to log entry object. :param match: regex match object from ``pattern`` match. :type match: :py:class:`re.MatchObject` :returns: log entry object with all pattern keys as attributes. :rtype: :py:class:`collections.namedtuple` """ return self._entry(**match.groupdict()) @classmethod def all_formats(cls): """Mapping of all defined log format patterns. :returns: dictionary of name:``LogFormat`` instances. :rtype: ``dict`` """ formats = {} for name, ref in cls.__formats_.items(): instance = ref() if instance is not None: formats[name] = instance return formats NGINX = LogFormat('nginx', r''' ^(?P<remote_addr>\S+)\s-\s # Remote address (?P<remote_user>\S+)\s # Remote user \[(?P<timestamp>.*?)\]\s # Local time " # Request (?P<verb>[A-Z]+)\s # HTTP verb (GET, POST, PUT, ...) (?P<path>[^?]+) # Request path (?:\?.+)? # Query string \sHTTP/(?:[\d.]+) # HTTP/x.x protocol "\s # /Request (?P<status>\d+?)\s # Response status code (?P<body_bytes_sent>\d+?)\s # Body size in bytes "(?P<http_referer>[^"]+?)"\s # Referer header "(?P<http_user_agent>[^"]+?)"\s # User-Agent header "(?P<http_x_forwarded_for>[^"]+?)"\s # X-Forwarded-For header (?P<request_time>[\d\.]+)\s # Request time (?P<upstream_response_time>[\d\.]+)\s? # Upstream response time (?P<pipe>\S+)?$ # Pipelined request ''', time_format='%d/%b/%Y:%H:%M:%S +0000') """Nginx ``combinded_timed`` format:: '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for" ' '$request_time $upstream_response_time $pipe'; """
{ "repo_name": "fabianbuechler/analog", "path": "analog/formats.py", "copies": "1", "size": "5018", "license": "mit", "hash": -4405383301565062000, "line_mean": 37.3053435115, "line_max": 80, "alpha_frac": 0.5611797529, "autogenerated": false, "ratio": 4.227464195450716, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5288643948350716, "avg_score": null, "num_lines": null }
"""Analog log report object.""" from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import Counter, defaultdict, OrderedDict import time from analog.renderers import Renderer from analog.utils import PrefixMatchingCounter try: from statistics import mean, median except ImportError: from analog.statistics import mean, median from analog import LOG class ListStats(object): """Statistic analysis of a list of values. Provides the mean, median and 90th, 75th and 25th percentiles. """ def __init__(self, elements): """Calculate some stats from list of values. :param elements: list of values. :type elements: ``list`` """ self.mean = mean(elements) if elements else None self.median = median(elements) if elements else None class Report(object): """Log analysis report object. Provides these statistical metrics: * Number for requests. * Response request method (HTTP verb) distribution. * Response status code distribution. * Requests per path. * Response time statistics (mean, median). * Response upstream time statistics (mean, median). * Response body size in bytes statistics (mean, median). * Per path request method (HTTP verb) distribution. * Per path response status code distribution. * Per path response time statistics (mean, median). * Per path response upstream time statistics (mean, median). * Per path response body size in bytes statistics (mean, median). """ def __init__(self, verbs, status_codes): """Create new log report object. Use ``add()`` method to add log entries to be analyzed. :param verbs: HTTP verbs to be tracked. :type verbs: ``list`` :param status_codes: status_codes to be tracked. May be prefixes, e.g. ["100", "2", "3", "4", "404" ] :type status_codes: ``list`` :returns: Report analysis object :rtype: :py:class:`analog.report.Report` """ def verb_counter(): return Counter({verb: 0 for verb in verbs}) def status_counter(): return PrefixMatchingCounter( {str(code): 0 for code in status_codes}) self._start_time = time.clock() self.execution_time = None self.requests = 0 self._verbs = verb_counter() self._status = status_counter() self._times = [] self._upstream_times = [] self._body_bytes = [] self._path_requests = Counter() self._path_verbs = defaultdict(verb_counter) self._path_status = defaultdict(status_counter) self._path_times = defaultdict(list) self._path_upstream_times = defaultdict(list) self._path_body_bytes = defaultdict(list) def finish(self): """Stop execution timer.""" end_time = time.clock() self.execution_time = end_time - self._start_time def add(self, path, verb, status, time, upstream_time, body_bytes): """Add a log entry to the report. Any request with ``verb`` not matching any of ``self._verbs`` or ``status`` not matching any of ``self._status`` is ignored. :param path: monitored request path. :type path: ``str`` :param verb: HTTP method (GET, POST, ...) :type verb: ``str`` :param status: response status code. :type status: ``int`` :param time: response time in seconds. :type time: ``float`` :param upstream_time: upstream response time in seconds. :type upstream_time: ``float`` :param body_bytes: response body size in bytes. :type body_bytes: ``float`` """ # Only keep entries with verbs/status codes that are being tracked if verb not in self._verbs or self._status.match(status) is None: LOG.debug("Ignoring log entry for non-tracked verb ({verb}) or " "status code ({status!s}).".format(verb=verb, status=status)) return self.requests += 1 self._verbs[verb] += 1 self._status.inc(str(status)) self._times.append(time) self._upstream_times.append(upstream_time) self._body_bytes.append(body_bytes) self._path_requests[path] += 1 self._path_verbs[path][verb] += 1 self._path_status[path].inc(status) self._path_times[path].append(time) self._path_upstream_times[path].append(upstream_time) self._path_body_bytes[path].append(body_bytes) @property def verbs(self): """List request methods of all matched requests, ordered by frequency. :returns: tuples of HTTP verb and occurrency count. :rtype: ``list`` of ``tuple`` """ return self._verbs.most_common() @property def status(self): """List status codes of all matched requests, ordered by frequency. :returns: tuples of status code and occurrency count. :rtype: ``list`` of ``tuple`` """ return self._status.most_common() @property def times(self): """Response time statistics of all matched requests. :returns: response time statistics. :rtype: :py:class:`analog.report.ListStats` """ return ListStats(self._times) @property def upstream_times(self): """Response upstream time statistics of all matched requests. :returns: response upstream time statistics. :rtype: :py:class:`analog.report.ListStats` """ return ListStats(self._upstream_times) @property def body_bytes(self): """Response body size in bytes of all matched requests. :returns: response body size statistics. :rtype: :py:class:`analog.report.ListStats` """ return ListStats(self._body_bytes) @property def path_requests(self): """List paths of all matched requests, ordered by frequency. :returns: tuples of path and occurrency count. :rtype: ``list`` of ``tuple`` """ return self._path_requests.most_common() @property def path_verbs(self): """List request methods (HTTP verbs) of all matched requests per path. Verbs are grouped by path and ordered by frequency. :returns: path mapping of tuples of verb and occurrency count. :rtype: ``dict`` of ``list`` of ``tuple`` """ return OrderedDict( sorted(((path, counter.most_common()) for path, counter in self._path_verbs.items()), key=lambda item: item[0])) @property def path_status(self): """List status codes of all matched requests per path. Status codes are grouped by path and ordered by frequency. :returns: path mapping of tuples of status code and occurrency count. :rtype: ``dict`` of ``list`` of ``tuple`` """ return OrderedDict( sorted(((path, counter.most_common()) for path, counter in self._path_status.items()), key=lambda item: item[0])) @property def path_times(self): """Response time statistics of all matched requests per path. :returns: path mapping of response time statistics. :rtype: ``dict`` of :py:class:`analog.report.ListStats` """ return OrderedDict( sorted(((path, ListStats(values)) for path, values in self._path_times.items()), key=lambda item: item[0])) @property def path_upstream_times(self): """Response upstream time statistics of all matched requests per path. :returns: path mapping of response upstream time statistics. :rtype: ``dict`` of :py:class:`analog.report.ListStats` """ return OrderedDict( sorted(((path, ListStats(values)) for path, values in self._path_upstream_times.items()), key=lambda item: item[0])) @property def path_body_bytes(self): """Response body size in bytes of all matched requests per path. :returns: path mapping of body size statistics. :rtype: ``dict`` of :py:class:`analog.report.ListStats` """ return OrderedDict( sorted(((path, ListStats(values)) for path, values in self._path_body_bytes.items()), key=lambda item: item[0])) def render(self, path_stats, output_format): """Render report data into ``output_format``. :param path_stats: include per path statistics in output. :type path_stats: ``bool`` :param output_format: name of report renderer. :type output_format: ``str`` :raises: :py:class:`analog.exceptions.UnknownRendererError` or unknown ``output_format`` identifiers. :returns: rendered report data. :rtype: ``str`` """ renderer = Renderer.by_name(name=output_format) return renderer.render(self, path_stats=path_stats)
{ "repo_name": "fabianbuechler/analog", "path": "analog/report.py", "copies": "1", "size": "9266", "license": "mit", "hash": -2104843089820055800, "line_mean": 31.975088968, "line_max": 78, "alpha_frac": 0.5995035614, "autogenerated": false, "ratio": 4.344116268166901, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5443619829566901, "avg_score": null, "num_lines": null }
"""Analog log report renderers.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import abc import csv try: from cStringIO import StringIO except ImportError: from io import StringIO import textwrap from tabulate import tabulate from analog.exceptions import UnknownRendererError from analog.utils import PrefixMatchingCounter def find_subclasses(cls, _seen=None): """Find all subclasses (recursively) of ``cls``. :param cls: class object. :param _seen: set of already found classes if called recursively. :returns: generator of ``cls`` subclasses. :rtype: ``generator`` """ if _seen is None: _seen = set() for subclass in cls.__subclasses__(): if subclass not in _seen: _seen.add(subclass) yield subclass for subclass in find_subclasses(subclass, _seen): yield subclass def add_metaclass(metaclass): """From six: Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper @add_metaclass(abc.ABCMeta) class Renderer(object): """Base report renderer interface.""" name = None @abc.abstractmethod def render(self, report, path_stats=False): """Render report statistics. :param report: log analysis report object. :type report: :py:class:`analog.report.Report` :param path_stats: include per path statistics in output. :type path_stats: ``bool`` :returns: output string :rtype: `str` """ @classmethod def all_renderers(cls): """Get a mapping of all defined report renderer names. :returns: dictionary of name to renderer class. :rtype: ``dict`` """ return {subclass.name: subclass for subclass in find_subclasses(cls) if subclass.name is not None} @classmethod def by_name(cls, name): """Select specific ``Renderer`` subclass by name. :param name: name of subclass. :type name: ``str`` :returns: ``Renderer`` subclass instance. :rtype: :py:class:`analog.renderers.Renderer` :raises: :py:class:`analog.exceptions.UnknownRendererError` for unknown subclass names. """ renderers = cls.all_renderers() if name in renderers: return renderers[name]() raise UnknownRendererError(name) class PlainTextRenderer(Renderer): """Default renderer for plain text output in list format.""" name = "plain" def render(self, report, path_stats=False): """ Render overall analysis summary report. :returns: output string :rtype: `str` """ output = textwrap.dedent("""\ Requests: {self.requests} HTTP Verbs: {verbs} Status Codes: {status} Path Requests: {paths} Times [s]: {times} Upstream Times [s]: {upstream_times} Body Bytes Sent [B]: {body_bytes} """).format( self=report, verbs=self._indent(self._str_path_counts(report.verbs)), status=self._indent(self._str_path_counts(report.status)), paths=self._indent(self._str_path_counts(report.path_requests)), times=self._indent(self._render_list_stats(report.times)), upstream_times=self._indent( self._render_list_stats(report.upstream_times)), body_bytes=self._indent( self._render_list_stats(report.body_bytes))) if path_stats: output += "\n" + self._render_path_stats(report) return output def _render_path_stats(self, report): """ Render per path analysis summary report. :returns: output string :rtype: `str` """ output = [] for path, verbs, status, times, upstream_times, body_bytes in zip( report.path_verbs.keys(), report.path_verbs.values(), report.path_status.values(), report.path_times.values(), report.path_upstream_times.values(), report.path_body_bytes.values()): output.append(textwrap.dedent("""\ {path} HTTP Verbs: {verbs} Status Codes: {status} Times [s]: {times} Upstream Times [s]: {upstream_times} Body Bytes Sent [B]: {body_bytes} """).format( path=path, verbs=self._indent(self._str_path_counts(verbs), 8), status=self._indent(self._str_path_counts(status), 8), times=self._indent(self._render_list_stats(times), 8), upstream_times=self._indent( self._render_list_stats(upstream_times), 8), body_bytes=self._indent( self._render_list_stats(body_bytes), 8))) return "\n".join(output) def _render_list_stats(self, list_stats): """ Generate pretty representation of list statistics object. :param list_stats: ``ListStats`` instance. :returns: statistic report. :rtype: ``str`` """ return textwrap.dedent("""\ {stats.mean:>10.3f} mean {stats.median:>10.3f} median """).format(stats=list_stats) def _str_path_counts(self, path_counts): """ Render path count. :returns: output string :rtype: `str` """ return "\n".join("{count:>10,} {key}".format( key=key, count=count) for key, count in path_counts) def _indent(self, text, indent=4): """ Render every line after the first line indented. Example:: line1 line2 line3 :returns: output string :rtype: `str` """ lines = [] for idx, line in enumerate(text.splitlines()): space = " " * indent if idx > 0 else "" lines.append(space + line) return "\n".join(lines) @add_metaclass(abc.ABCMeta) class TabularDataRenderer(Renderer): """Base renderer for report output in any tabular form.""" #: field names for ``ListStats`` attributes _stats_fields = ('times', 'upstream_times', 'body_bytes') #: attribute names of ``ListStats`` attributes _list_stats_keys = ("mean", "median") def _list_stats(self, list_stats): """Get list of (key,value) tuples for each attribute of ``list_stats``. :param list_stats: list statistics object. :type list_stats: :py:class:`analog.report.ListStats` :returns: (key, value) tuples for each ``ListStats`` attribute. :rtype: ``list`` of ``tuple`` """ return zip(self._list_stats_keys, [list_stats.mean, list_stats.median]) def _tabular_data(self, report, path_stats): """Prepare tabular data for output. Generate a list of header fields, a list of total values for each field and a list of the same values per path. :param report: log analysis report object. :type report: :py:class:`analog.report.Report` :param path_stats: include per path statistics in output. :type path_stats: ``bool`` :returns: tuple of table (headers, rows). :rtype: ``tuple`` """ # sorted list of all HTTP verbs in this report and their counts verb_names, verb_counts = zip(*sorted( (verb, count) for (verb, count) in report.verbs)) # sorted list of all status codes in this report and their counts status_names, status_counts = zip(*sorted( (str(status), count) for (status, count) in report.status)) # all statistical attributes of the report stats = [(stats_field, self._list_stats(getattr(report, stats_field))) for stats_field in self._stats_fields] stats_names, stats_values = zip(*( ('{0}_{1}'.format(field, analysis), value) for (field, list_stats) in stats for (analysis, value) in list_stats)) status_headers = tuple("status_{code:x<3}".format(code=code) for code in status_names) headers = (("path", "requests") + verb_names + status_headers + stats_names) total = (("total", report.requests) + verb_counts + status_counts + stats_values) rows = [] # include path statistics? if path_stats: # get per path values from report, ordered by path for (path, verbs, status, times, utimes, body_bytes) in zip( report.path_verbs.keys(), report.path_verbs.values(), report.path_status.values(), report.path_times.values(), report.path_upstream_times.values(), report.path_body_bytes.values()): requests = report._path_requests[path] verbs = dict(verbs) status = PrefixMatchingCounter(dict(status)) row = [path, requests] row += [verbs.get(name, 0) for name in verb_names] row += [status.get(name, 0) for name in status_names] row += [time[1] for time in self._list_stats(times)] row += [utime[1] for utime in self._list_stats(utimes)] row += [bbytes[1] for bbytes in self._list_stats(body_bytes)] rows.append(row) rows.append(total) return (list(headers), rows) @add_metaclass(abc.ABCMeta) class ASCIITableRenderer(TabularDataRenderer): """Base renderer for report output in ascii-table format.""" tabulate_format = None def render(self, report, path_stats=False): """Render report statistics using ``tabulate``. :param report: log analysis report object. :type report: :py:class:`analog.report.Report` :param path_stats: include per path statistics in output. :type path_stats: ``bool`` :returns: output string :rtype: `str` """ headers, rows = self._tabular_data(report, path_stats) return tabulate(rows, headers=headers, tablefmt=self.tabulate_format, floatfmt='.3f') class SimpleTableRenderer(ASCIITableRenderer): """Renderer for tabular report output in simple reSt table format.""" name = "table" tabulate_format = 'rst' class GridTableRenderer(ASCIITableRenderer): """Renderer for tabular report output in grid table format.""" name = "grid" tabulate_format = 'grid' @add_metaclass(abc.ABCMeta) class SeparatedValuesRenderer(TabularDataRenderer): """Base renderer for report output in delimiter-separated values format.""" #: value delimter. E.g. comma or tab. delimiter = None def render(self, report, path_stats): """Render report statistics using a CSV writer. :param report: log analysis report object. :type report: :py:class:`analog.report.Report` :param path_stats: include per path statistics in output. :type path_stats: ``bool`` :returns: output string :rtype: `str` """ headers, rows = self._tabular_data(report, path_stats) try: stream = StringIO(newline='') except TypeError: stream = StringIO() # Python 2.7 does not support newline arg writer = csv.writer(stream, delimiter=str(self.delimiter), lineterminator='\n') writer.writerow(headers) writer.writerows(rows) return stream.getvalue()[:-1] # Do not return last newline class CSVRenderer(SeparatedValuesRenderer): """Renderer for report output in comma separated values format.""" name = 'csv' delimiter = ',' class TSVRenderer(SeparatedValuesRenderer): """Renderer for report output in tab separated values format.""" name = 'tsv' delimiter = '\t'
{ "repo_name": "fabianbuechler/analog", "path": "analog/renderers.py", "copies": "1", "size": "12696", "license": "mit", "hash": 8133220540198397000, "line_mean": 29.9658536585, "line_max": 79, "alpha_frac": 0.5642722117, "autogenerated": false, "ratio": 4.29790115098172, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.536217336268172, "avg_score": null, "num_lines": null }
ANALOG_MAX = 255 # COMMANDS SENT TO ARDUINO analog_0_sent = 0; analog_1_sent = 0; digital_0_sent = False; digital_1_sent = False; # RECORD MODE record = False record_file_number = None # None if not recording record_array = [] # List that stores quads (PIN TYPE, PIN INDEX, value, time step) record_start_time = None playback = False playback_file_number = None playback_array = [] # Populated from a recording file, for playback playback_paused = False playback_cancel = False # BUTTON TYPES TYPE_BUTTON = 'b' # VALUE WILL BE 0 OR 1 TYPE_AXIS = 'a' # VALUE WILL BE BETWEEN 0 AND 1 TYPE_DPAD = 'd' # VALUE WILL BE PAIR OF -1, 0, OR 1 # BUTTON NAMES BUTTON_A = 0 BUTTON_B = 1 BUTTON_X = 2 BUTTON_Y = 3 BUTTON_LEFT_BUMPER = 4 BUTTON_RIGHT_BUMPER = 5 BUTTON_SELECT = 6 BUTTON_START = 7 BUTTON_LEFT_STICK = 8 BUTTON_RIGHT_STICK = 9 AXIS_X_LEFT_STICK = 0 AXIS_Y_LEFT_STICK = 1 AXIS_LEFT_TRIGGER = 2 AXIS_X_RIGHT_STICK = 3 AXIS_Y_RIGHT_STICK = 4 AXIS_RIGHT_TRIGGER = 5 D_PAD = 0 # INDEX TO THE CURRENT CONTROL MAP # ARRAY DEFINED IN ui_map.py map_index = 0 # ONLY USED BY ui_map: axis_0_locked = False axis_1_locked = False
{ "repo_name": "DaveBuckingham/robosoft", "path": "global_data.py", "copies": "1", "size": "1292", "license": "mit", "hash": -4061747097755971000, "line_mean": 20.1803278689, "line_max": 92, "alpha_frac": 0.6122291022, "autogenerated": false, "ratio": 2.772532188841202, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3884761291041202, "avg_score": null, "num_lines": null }
'''Analog Port ================= Instances of :mod:`~moa.device.port` that represent analog input/output devices. ''' from kivy.properties import ( NumericProperty, ObjectProperty, StringProperty, DictProperty) from moa.device.port import Channel, Port from functools import partial from time import clock __all__ = ('AnalogChannel', 'AnalogPort', 'NumericPropertyChannel', 'NumericPropertyPort') class AnalogChannel(Channel): '''A abstract single channel analog device. ''' state = NumericProperty(None, allownone=True) '''The state of the channel. :attr:`state` is a :class:`~kivy.properties.NumericProperty` and defaults to None. ''' def set_state(self, state, **kwargs): '''A stub method defining the prototype for :meth:`set_state` of derived classes. :Parameters: `state`: float, int The value to set the state to. .. note:: When called, it raises a `NotImplementedError` if not overwritten. ''' raise NotImplementedError() class AnalogPort(Port): '''A abstract multi-channel analog device. ''' def set_state(self, **kwargs): '''A stub method defining the prototype for :meth:`set_state` of derived classes. For devices that support it, the properties passed in one call can be set to the requested state simultaneously. Method accepts property names and their values as keyword arguments, where each of the properties will be set to those values. E.g.:: >>> port.set_state(voltage=1.6, amp=3.7) .. note:: When called, it raises a `NotImplementedError` if not overwritten. ''' raise NotImplementedError() class NumericPropertyChannel(AnalogChannel): '''A class that represents a analog channel by the :class:`~kivy.properties.NumericProperty` of a widget. For example:: >>> from kivy.uix.widget import Widget >>> from kivy.properties import NumericProperty >>> class MyWidget(Widget): ... value = NumericProperty(0) ... def on_value(self, *largs): ... print('Value changed to "{}"'.format(self.value)) >>> widget = MyWidget() >>> chan = NumericPropertyChannel(channel_widget=widget, \ prop_name='value') >>> chan.activate(chan) >>> print(widget.value, chan.state) (0, 0) >>> widget.value = 5.15 Value changed to "5.15" >>> print(widget.value, chan.state) (5.15, 5.15) >>> chan.set_state(3.3) Value changed to "3.3" >>> print(widget.value, chan.state) (3.3, 3.3) >>> chan.deactivate(chan) ''' channel_widget = ObjectProperty(None) '''The widget whose :class:`~kivy.properties.NumericProperty` the channels is bound to. ''' prop_name = StringProperty('') '''The name of the :class:`~kivy.properties.NumericProperty` in widget :attr:`channel_widget` that represents the analog channel. ''' def _update_state(self, instance, value): self.timestamp = clock() self.state = value self.dispatch('on_data_update', self) def activate(self, *largs, **kwargs): if super(NumericPropertyChannel, self).activate(*largs, **kwargs): widget = self.channel_widget prop = self.prop_name widget.bind(**{prop: self._update_state}) self.state = getattr(widget, prop) return True return False def deactivate(self, *largs, **kwargs): if super(NumericPropertyChannel, self).deactivate(*largs, **kwargs): self.channel_widget.unbind(**{self.prop_name: self._update_state}) return True return False def set_state(self, state, **kwargs): setattr(self.channel_widget, self.prop_name, state) class NumericPropertyViewChannel(AnalogChannel): '''Device that the :class:`~kivy.properties.NumericProperty` of a widget to control or reflect the state of an actual analog hardware device. :class:`NumericPropertyViewChannel` is very similar to :class:`NumericPropertyChannel`, except that for :class:`NumericPropertyChannel` its only purpose is to have :attr:`~AnalogChannel.state` reflect the state of the :class:`~kivy.properties.NumericProperty` of the button while for :class:`NumericPropertyViewChannel` the purpose is for the :class:`~kivy.properties.NumericProperty` to visualize and control the state of an external device reflected in :attr:`~AnalogChannel.state`. That is for :class:`NumericPropertyViewChannel`, :meth:`AnalogChannel.set_state` needs to be overwritten by the derived class to update the hardware and when the hardware changes :attr:`~AnalogChannel.state` should be updated. However, in addition, the :class:`~kivy.properties.NumericProperty` will automatically be updated to reflect that state and when the widget's :class:`~kivy.properties.NumericProperty` changes it'll trigger a call to :meth:`AnalogChannel.set_state`. ''' channel_widget = ObjectProperty(None) '''The widget whose :class:`~kivy.properties.NumericProperty` the channels is bound to. ''' prop_name = StringProperty('') '''The name of the :class:`~kivy.properties.NumericProperty` in widget :attr:`channel_widget` that represents the analog channel. ''' def _update_from_device(self, instance, value): setattr(self.channel_widget, self.prop_name, value) def _update_from_channel_widget(self, instance, value): if self.state != value: self.set_state(value) def activate(self, *largs, **kwargs): if super(NumericPropertyViewChannel, self).activate(*largs, **kwargs): if 'o' in self.direction: widget = self.channel_widget widget.fbind( self.prop_name, self._update_from_channel_widget) self.fbind('state', self._update_from_device) return True return False def deactivate(self, *largs, **kwargs): if super(NumericPropertyViewChannel, self).deactivate(*largs, **kwargs): if 'o' in self.direction: widget = self.channel_widget if widget is not None: widget.funbind( self.prop_name, self._update_from_channel_widget) self.funbind('state', self._update_from_device) return True return False class NumericPropertyPort(AnalogPort): '''A class that represents multiple analog channels with multiple :class:`~kivy.properties.NumericProperty` instances of a widget. .. note:: For this class the values in :attr:`~moa.device.port.Port.attr_map` should be set to the names of the :class:`~kivy.properties.NumericProperty` instances underlying the channels. Similar to the single channel name :attr:`NumericPropertyChannel.prop_name`. For example:: >>> from kivy.uix.widget import Widget >>> from kivy.properties import NumericProperty >>> class MyWidget(Widget): ... voltage = NumericProperty(0) ... amps = NumericProperty(0) ... ... def on_voltage(self, *largs): ... print('Voltage changed to "{}"'.format(self.voltage)) ... ... def on_amps(self, *largs): ... print('Amps changed to "{}"'.format(self.amps)) >>> class Devs(NumericPropertyPort): ... voltage = NumericProperty(None, allownone=True) ... amps = NumericProperty(None, allownone=True) >>> widget = MyWidget() >>> chan = Devs(channel_widget=widget, attr_map={'voltage': 'voltage',\ 'amps': 'amps'}) >>> chan.activate(chan) >>> print(widget.voltage, widget.amps, chan.voltage, chan.amps) (0, 0, 0, 0) >>> widget.voltage = 5.15 Voltage changed to "5.15" >>> print(widget.voltage, widget.amps, chan.voltage, chan.amps) (5.15, 0, 5.15, 0) >>> chan.set_state(voltage=3.3, amps=2.0) Amps changed to "2.0" Voltage changed to "3.3" >>> print(widget.voltage, widget.amps, chan.voltage, chan.amps) (3.3, 2.0, 3.3, 2.0) >>> chan.deactivate(chan) ''' channel_widget = ObjectProperty(None) '''Similar to :attr:`NumericPropertyChannel.channel_widget`, the widget that contains the properties simulating the analog channels. ''' _widget_callbacks = [] '''Stores the property callbacks bound when activating the channels. ''' def _update_state(self, attr, instance, value): self.timestamp = clock() setattr(self, attr, value) self.dispatch('on_data_update', self) def activate(self, *largs, **kwargs): if super(NumericPropertyPort, self).activate(*largs, **kwargs): widget = self.channel_widget callbacks = self._widget_callbacks = [] f = self._update_state for attr, wid_attr in self.attr_map.items(): callbacks.append({wid_attr: partial(f, attr)}) widget.bind(**callbacks[-1]) setattr(self, attr, getattr(widget, wid_attr)) return True return False def deactivate(self, *largs, **kwargs): if super(NumericPropertyPort, self).deactivate(*largs, **kwargs): widget = self.channel_widget for d in self._widget_callbacks: widget.unbind(**d) self._widget_callbacks = [] return True return False def set_state(self, **kwargs): attr_map = self.attr_map widget = self.channel_widget for attr, value in kwargs.items(): setattr(widget, attr_map[attr], value) class NumericPropertyViewPort(AnalogPort): '''A class that represents multiple analog channels with multiple :class:`~kivy.properties.NumericProperty` instances of a widget. :class:`NumericPropertyViewPort` is very similar to :class:`NumericPropertyPort`, except that for :class:`NumericPropertyPort` its only purpose is to have the port states reflect the states of the :class:`~kivy.properties.NumericProperty` instances while for :class:`NumericPropertyViewPort` the purpose is for the :class:`~kivy.properties.NumericProperty` instances to visualize and control the states of the external device reflected in the properties. That is for :class:`NumericPropertyViewPort`, :meth:`AnalogPort.set_state` needs to be overwritten by the derived class to update the hardware and when the hardware changes the properties should be updated. However, in addition, the widget's :class:`~kivy.properties.NumericProperty` instances will automatically be updated to reflect that state and when the :class:`~kivy.properties.NumericProperty` instances change it'll trigger a call to :meth:`AnalogPort.set_state`. .. note:: For this class the values in :attr:`~moa.device.port.Port.attr_map` should be set to the names of the :class:`~kivy.properties.NumericProperty` instances underlying the channels. Similar to the single channel name :attr:`NumericPropertyChannel.prop_name`. However, since this channel also controls hardware that would require :attr:`~moa.device.port.Port.attr_map` for mapping the property names to the hardware channel ports or similar, :attr:`dev_map` and :attr:`chan_dev_map` has been added as a secondary mapping for this purpose. ''' channel_widget = ObjectProperty(None) '''Similar to :attr:`NumericPropertyChannel.channel_widget`, the widget that contains the properties simulating the analog channels. ''' dev_map = DictProperty({}) '''A secondary mapping of property names to channel numbers etc to be used by the derived classes instead of :attr:`~moa.device.port.Port.attr_map` because :attr:`~moa.device.port.Port.attr_map` is used to map the widget's properties to property names. ''' chan_dev_map = DictProperty({}) '''The inverse mapping of :attr:`dev_map`. It is automatically generated and is read only. ''' def __init__(self, **kwargs): super(NumericPropertyViewPort, self).__init__(**kwargs) self.bind(dev_map=self._reverse_dev_mapping) self._reverse_dev_mapping() def _reverse_dev_mapping(self, *largs): for k in self.dev_map: if not hasattr(self, k): raise AttributeError('{} is not an attribute of {}' .format(k, self)) self.chan_dev_map = {v: k for k, v in self.dev_map.items()} def _update_from_device(self, attr, instance, value): setattr(self.channel_widget, attr, value) def _update_from_channel_widget(self, attr, instance, value): if getattr(self, attr) != value: self.set_state(**{attr: value}) def activate(self, *largs, **kwargs): if super(NumericPropertyViewPort, self).activate(*largs, **kwargs): if 'o' in self.direction: wid = self.channel_widget for attr, wid_attr in self.attr_map.items(): wid.fbind( wid_attr, self._update_from_channel_widget, attr) for attr, wid_attr in self.attr_map.items(): self.fbind(attr, self._update_from_device, wid_attr) return True return False def deactivate(self, *largs, **kwargs): if super(NumericPropertyViewPort, self).deactivate(*largs, **kwargs): if 'o' in self.direction: wid = self.channel_widget if wid is not None: for attr, wid_attr in self.attr_map.items(): wid.funbind( wid_attr, self._update_from_channel_widget, attr) for attr, wid_attr in self.attr_map.items(): self.funbind(attr, self._update_from_device, wid_attr) return True return False
{ "repo_name": "matham/moa", "path": "moa/device/analog.py", "copies": "1", "size": "14381", "license": "mit", "hash": -8804389085658274000, "line_mean": 36.4505208333, "line_max": 79, "alpha_frac": 0.6216535707, "autogenerated": false, "ratio": 4.176880627359861, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5298534198059861, "avg_score": null, "num_lines": null }