text
stringlengths
0
1.05M
meta
dict
# Absolute Optical Flow Rotation/Scale # # This example shows off using your OpenMV Cam to measure # rotation/scale by comparing the current and a previous # image against each other. Note that only rotation/scale is # handled - not X and Y translation in this mode. # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY rotate the camera around the lens and move the camera # forward/backwards to see the numbers change. # I.e. Z direction changes only. import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a z_rotation value below and you should see the r output be equal to that. if(0): expected_rotation = 20.0 img.rotation_corr(z_rotation=expected_rotation) # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works. # Put in a zoom value below and you should see the z output be equal to that. if(0): expected_zoom = 0.8 img.rotation_corr(zoom=expected_zoom) # For this example we never update the old image to measure absolute change. displacement = extra_fb.find_displacement(img, logpolar=True) # Offset results are noisy without filtering so we drop some accuracy. rotation_change = int(math.degrees(displacement.rotation()) * 5) / 5.0 zoom_amount = displacement.scale() if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. print("{0:+f}r {1:+f}z {2} {3} FPS".format(rotation_change, zoom_amount, \ displacement.response(), clock.fps())) else: print(clock.fps())
{ "repo_name": "kwagyeman/openmv", "path": "scripts/examples/Arduino/Portenta-H7/22-Optical-Flow/absolute-rotation-scale.py", "copies": "2", "size": "3230", "license": "mit", "hash": 1578007098796938200, "line_mean": 47.2089552239, "line_max": 97, "alpha_frac": 0.7086687307, "autogenerated": false, "ratio": 3.6914285714285713, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.540009730212857, "avg_score": null, "num_lines": null }
# Absolute Optical Flow Translation # # This example shows off using your OpenMV Cam to measure translation # in the X and Y direction by comparing the current and a previous # image against each other. Note that only X and Y translation is # handled - not rotation/scale in this mode. # To run this demo effectively please mount your OpenMV Cam on a steady # base and SLOWLY translate it to the left, right, up, and down and # watch the numbers change. Note that you can see displacement numbers # up +- half of the hoizontal and vertical resolution. import sensor, image, time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # For this example we never update the old image to measure absolute change. displacement = extra_fb.find_displacement(img) # Offset results are noisy without filtering so we drop some accuracy. sub_pixel_x = int(displacement.x_translation() * 5) / 5.0 sub_pixel_y = int(displacement.y_translation() * 5) / 5.0 if(displacement.response() > 0.1): # Below 0.1 or so (YMMV) and the results are just noise. print("{0:+f}x {1:+f}y {2} {3} FPS".format(sub_pixel_x, sub_pixel_y, displacement.response(), clock.fps())) else: print(clock.fps())
{ "repo_name": "iabdalkader/openmv", "path": "scripts/examples/22-Optical-Flow/absolute-translation.py", "copies": "3", "size": "2715", "license": "mit", "hash": 6572079657225470000, "line_mean": 48.3636363636, "line_max": 95, "alpha_frac": 0.7145488029, "autogenerated": false, "ratio": 3.6345381526104417, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.003028294722876301, "num_lines": 55 }
# Absorb linear operators into proximal operators. from proximal.lin_ops import Variable, scale, mul_elemwise, Constant from proximal.prox_fns import (nonneg, weighted_nonneg, norm1, weighted_norm1, poisson_norm, weighted_poisson_norm, sum_squares, weighted_sum_squares, group_norm1, weighted_group_norm1, zero_prox) import numpy as np import copy as cp WEIGHTED = {nonneg: weighted_nonneg, norm1: weighted_norm1, sum_squares: weighted_sum_squares, poisson_norm: weighted_poisson_norm, group_norm1: weighted_group_norm1} def absorb_all_lin_ops(prox_funcs): """Repeatedy absorb lin ops. """ new_proxes = [] ready = prox_funcs[:] while len(ready) > 0: curr = ready.pop(0) absorbed = absorb_lin_op(curr) if len(absorbed) == 1 and absorbed[0] == curr: new_proxes.append(absorbed[0]) else: ready += absorbed return new_proxes def absorb_lin_op(prox_fn): """If possible moves the top level lin op argument into the prox operator. For example, elementwise multiplication can be folded into a separable function's prox. """ # Never eliminate variables. if isinstance(prox_fn.lin_op, Variable): return [prox_fn] # Absorb a lin op into sum_entries/zero. if type(prox_fn) == zero_prox and prox_fn.gamma == 0: outputs = [] inputs = [prox_fn.c] for arg in prox_fn.lin_op.input_nodes: outputs.append(np.zeros(arg.shape)) prox_fn.lin_op.adjoint(inputs, outputs) new_proxes = [] for output, arg in zip(outputs, prox_fn.lin_op.input_nodes): new_proxes.append(prox_fn.copy(arg, c=output)) return new_proxes # Fold scaling into the function. if isinstance(prox_fn.lin_op, mul_elemwise): op_weight = prox_fn.lin_op.weight def get_new_prox(prox_type, args): new_prox = prox_type(*args) copy_prox_fn(new_prox, prox_fn) idxs = op_weight != 0 new_prox.b[idxs] = prox_fn.b[idxs] / op_weight[idxs] new_prox.c = prox_fn.c * op_weight return [new_prox] if type(prox_fn) in WEIGHTED.keys() and prox_fn.gamma == 0: args = [prox_fn.lin_op.input_nodes[0]] + prox_fn.get_data() + \ [op_weight] return get_new_prox(WEIGHTED[type(prox_fn)], args) elif type(prox_fn) in WEIGHTED.values() and prox_fn.gamma == 0: args = [prox_fn.lin_op.input_nodes[0]] + prox_fn.get_data() args[-1] = args[-1] * op_weight return get_new_prox(type(prox_fn), args) # Fold scalar into the function. if isinstance(prox_fn.lin_op, scale): scalar = prox_fn.lin_op.scalar new_prox = prox_fn.copy(prox_fn.lin_op.input_nodes[0], beta=prox_fn.beta * scalar, b=prox_fn.b / scalar, c=prox_fn.c * scalar, gamma=prox_fn.gamma * scalar**2) return [new_prox] # No change. return [prox_fn] def copy_prox_fn(dst_prox, src_prox): """Copy the optional parameters from src_prox to dst_prox. """ dst_prox.alpha = src_prox.alpha dst_prox.beta = src_prox.beta dst_prox.gamma = src_prox.gamma dst_prox.b = src_prox.b dst_prox.c = src_prox.c dst_prox.d = src_prox.d def copy_non_var(lin_op): """If not a variable, returns a shallow copy. """ if isinstance(lin_op, Variable): return lin_op else: return cp.copy(lin_op) def absorb_offset(prox_fn): """Absorb the constant offset into the b term and zero out constants in lin op. """ # Short circuit if no constant leaves. if len(prox_fn.lin_op.constants()) == 0: return prox_fn new_b = -prox_fn.lin_op.get_offset() # Zero out constants. new_lin_op = copy_non_var(prox_fn.lin_op) ready = [new_lin_op] while len(ready) > 0: curr = ready.pop(0) for idx, arg in enumerate(curr.input_nodes): if isinstance(arg, Constant): curr.input_nodes[idx] = Constant(np.zeros(arg.shape)) # Don't copy variables. else: curr.input_nodes[idx] = copy_non_var(arg) ready.append(curr.input_nodes[idx]) return prox_fn.copy(new_lin_op, b=new_b + prox_fn.b)
{ "repo_name": "comp-imaging/ProxImaL", "path": "proximal/algorithms/absorb.py", "copies": "2", "size": "4472", "license": "mit", "hash": 3865862143409765000, "line_mean": 35.064516129, "line_max": 92, "alpha_frac": 0.5811717352, "autogenerated": false, "ratio": 3.3028064992614476, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4883978234461448, "avg_score": null, "num_lines": null }
"""Abstactions for working with excel files.""" from win32 import run import constants as xlcharts runexcel = lambda: run('Excel.Application') def open_xlsx(app, path): return app.Workbooks.Open(path) def sheet(xlsx, index): return xlsx.Sheets.Item(index) def chart(sheet, reference): return sheet.ChartObjects(reference) class RangeAccessor(object): def __init__(self, sheet): self.sheet = sheet def __setitem__(self, key, value): self.sheet.Range(key).Value = value def __getitem__(self, key): rng = self.sheet.Range(key) add_cell_iteration(rng) return rng def add_cell_iteration(cells): def iteration(self): return iter(self.Value) setattr(cells.__class__, '__iter__', iteration) class Chart(object): def __init__(self, excel, sheet, chart_type): self.excel = excel self.sheet = sheet self.chart_type = chart_type def __call__(self, cell_range): self.sheet.Shapes.AddChart2(-1, xlcharts.XL_XY_SCATTER).Select() self.sheet.ChartObjects(self.sheet.Shapes[0].name).Activate() self.excel.ActiveChart.SetSourceData(cell_range)
{ "repo_name": "Intelimetrica/coati", "path": "coati/excel.py", "copies": "1", "size": "1180", "license": "mit", "hash": 2166662391699872800, "line_mean": 22.137254902, "line_max": 72, "alpha_frac": 0.6483050847, "autogenerated": false, "ratio": 3.4911242603550297, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46394293450550295, "avg_score": null, "num_lines": null }
"""Abstact Wrapper base-class for the FieldWrapper and DataclassWrapper.""" from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Generic, Optional, List, Type from ..utils import T class Wrapper(Generic[T], ABC): def __init__(self, wrapped: T, name: str): self.wrapped = wrapped self._dest: Optional[str] = None @property def dest(self) -> str: """ Where the attribute will be stored in the Namespace. """ lineage_names: List[str] = [w.name for w in self.lineage()] self._dest = ".".join(reversed([self.name] + lineage_names)) assert self._dest is not None return self._dest def lineage(self) -> List["Wrapper"]: lineage: List[Wrapper] = [] parent = self.parent while parent is not None: lineage.append(parent) parent = parent.parent return lineage @property def nesting_level(self) -> int: return len(self.lineage()) level = 0 parent = self.parent while parent is not None: parent = parent.parent level += 1 return level @abstractmethod def equivalent_argparse_code(self) -> str: pass @property @abstractmethod def name(self) -> str: pass @property @abstractmethod def parent(self) -> Optional["Wrapper"]: pass
{ "repo_name": "lebrice/SimpleParsing", "path": "simple_parsing/wrappers/wrapper.py", "copies": "1", "size": "1432", "license": "mit", "hash": -4938929028845493000, "line_mean": 26.0377358491, "line_max": 75, "alpha_frac": 0.5921787709, "autogenerated": false, "ratio": 4.249258160237389, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5341436931137389, "avg_score": null, "num_lines": null }
"""Abstract adapter class.""" from __future__ import absolute_import from pysnmp import error as pysnmp_error from pysnmp.entity.rfc3413.oneliner.cmdgen import CommunityData, UsmUserData, \ UdpTransportTarget, CommandGenerator from snmp_orm.utils import str_to_oid from snmp_orm.adapters.base import AbstractAdapter, AbstractException class PySNMPError(AbstractException): pass class AbstractSession(object): def __init__(self, host, port=None): self.transportTarget = UdpTransportTarget((host, port)) self.authData = None self.generator = CommandGenerator() def format_varBinds(self, varBinds): return [(str_to_oid(oid), value) for oid, value in varBinds] def format_varBindTable(self, varBindTable): result = [] for varBinds in varBindTable: result.extend(self.format_varBinds(varBinds)) return result def handle_error(self, errorIndication, errorStatus, errorIndex, varBinds=None, varBindTable=None): if errorIndication: raise PySNMPError(errorIndication) elif errorStatus: variables = varBinds or varBindTable[-1] text = errorStatus.prettyPrint() position = errorIndex and variables[int(errorIndex) - 1] or '?' raise PySNMPError("%s at %s" % (text, position)) def get(self, *args): try: errorIndication, errorStatus, \ errorIndex, varBinds = self.generator.getCmd( self.authData, self.transportTarget, *args) except pysnmp_error.PySnmpError as e: # handle origin PySNMPError from pysnmp module. errorIndication = e errorStatus, errorIndex, varBinds = None, None, [] self.handle_error(errorIndication, errorStatus, errorIndex, varBinds) return self.format_varBinds(varBinds) def set(self, *args): errorIndication, errorStatus, \ errorIndex, varBinds = self.generator.setCmd( self.authData, self.transportTarget, *args) self.handle_error(errorIndication, errorStatus, errorIndex, varBinds) return self.format_varBinds(varBinds) def getnext(self, *args): errorIndication, errorStatus, errorIndex, \ varBindTable = self.generator.nextCmd( self.authData, self.transportTarget, *args) self.handle_error( errorIndication, errorStatus, errorIndex, None, varBindTable) return self.format_varBindTable(varBindTable) def getbulk(self, rows, *args): errorIndication, errorStatus, errorIndex, \ varBindTable = self.generator.bulkCmd( self.authData, self.transportTarget, 0, rows, *args) self.handle_error( errorIndication, errorStatus, errorIndex, None, varBindTable) return self.format_varBindTable(varBindTable) class Session(AbstractSession): def __init__(self, host, port, version, community): super(Session, self).__init__(host, port) self.authData = CommunityData( 'agent', community, None if version == 2 else 0) class UsmSession(AbstractSession): def __init__(self, host, port=None, sec_name=None, sec_level=None, auth_protocol=None, auth_passphrase=None, priv_protocol=None, priv_passphrase=None): super(UsmSession, self).__init__(host, port) self.authData = UsmUserData(sec_name, auth_passphrase, priv_passphrase) class Adapter(AbstractAdapter): def get_snmp_v2_session(self, host, port, version, community, **kwargs): if community is None: raise TypeError("community can`t be None") return Session(host, port, version, community) def get_snmp_v3_session(self, host, port, version, sec_name, sec_level, auth_protocol, auth_passphrase, priv_protocol, priv_passphrase, **kwargs): if sec_name is None: raise TypeError("sec_name can`t be None") if auth_passphrase is None: raise TypeError("auth_passphrase can`t be None") if priv_passphrase is None: raise TypeError("priv_passphrase can`t be None") return UsmSession(host, port, sec_name, sec_level, auth_protocol, auth_passphrase, priv_protocol, priv_passphrase)
{ "repo_name": "blackwithwhite666/snmp_orm", "path": "snmp_orm/adapters/pysnmp.py", "copies": "1", "size": "4445", "license": "mit", "hash": -8753919701852876000, "line_mean": 38.6875, "line_max": 79, "alpha_frac": 0.6380202475, "autogenerated": false, "ratio": 3.990125673249551, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5128145920749551, "avg_score": null, "num_lines": null }
"""Abstract adapter class.""" from __future__ import absolute_import import logging from pprint import pformat from functools import wraps from six import Iterator from pyasn1.type.univ import Null from snmp_orm.config import DEBUG from snmp_orm.settings import SnmpV2Settings, SnmpV3Settings from snmp_orm.utils import str_to_oid logger = logging.getLogger(__name__) class AbstractException(Exception): pass def log(f): @wraps(f) def inner_wrapper(self, *args): logger.debug("[%s] Call %s%s" % (self.host, f.__name__, pformat(args))) result = f(self, *args) logger.debug("[%s] %s return %s" % (self.host, f.__name__, pformat(result))) return result if DEBUG: return inner_wrapper else: return f class Walker(Iterator): """SNMP walker class""" def __init__(self, agent, baseoid, use_bulk=True, bulk_rows=None): self.baseoid = baseoid self.baseoid_len = len(baseoid) self.lastoid = baseoid self.agent = agent self.use_bulk = use_bulk self.bulk_rows = bulk_rows self.raise_stop = False def __iter__(self): return self def __next__(self): if self.raise_stop: raise StopIteration() if self.use_bulk: rows = self.agent.getbulk(self.bulk_rows, self.lastoid) else: rows = self.agent.getnext(self.lastoid) if not rows: raise StopIteration() if self.use_bulk: slice = 0 for oid, _ in reversed(rows): diff = self.baseoid_len - len(oid) if (diff == 0 and oid[:-1] == self.baseoid[:-1]) or \ (diff != 0 and oid[:diff] == self.baseoid): break else: slice += 1 if slice > 0: rows = rows[:0 - slice] self.raise_stop = True if not rows: raise StopIteration() self.lastoid = rows[-1][0] return rows class AbstractAdapter(object): def __init__(self, settings_read, settings_write=None): settings_write = settings_write or settings_read.__class__() assert settings_write.__class__ == settings_read.__class__ _settings_write = settings_write.__class__() _settings_write.update(settings_read) _settings_write.update(settings_write) # Create session for host if isinstance(settings_read, SnmpV3Settings): session_getter = self.get_snmp_v3_session elif isinstance(settings_read, SnmpV2Settings): session_getter = self.get_snmp_v2_session else: raise TypeError self.host = settings_read["host"] self.settings_read = settings_read self.settings_write = _settings_write self.session_read = session_getter(**settings_read.prepare_kwargs()) if settings_read == _settings_write: self.session_write = self.session_read else: self.session_write = session_getter(**_settings_write.prepare_kwargs()) def get_snmp_v2_session(self, host, port, version, community, **kwargs): raise NotImplementedError() def get_snmp_v3_session(self, host, port, version, sec_name=None, sec_level=None, auth_protocol=None, auth_passphrase=None, priv_protocol=None, priv_passphrase=None, **kwargs): raise NotImplementedError() @log def get(self, *args): """Return tuple of pairs: .. code-block:: python ((1, 3, 6, 1, 2, 1, 1, 1, 0), OctetString('DGS-3100-24 Gigabit stackable L2 Managed Switch')) """ return self.session_read.get(*map(str_to_oid, args)) def get_one(self, oid): """Return oid value.""" variables = self.get(oid) if variables: result = variables[0][1] if not isinstance(result, Null): return result return None @log def getnext(self, *args): """Return table: .. code-block:: python [((1, 3, 6, 1, 2, 1, 1, 1, 0), OctetString('DGS-3100-24 Gigabit stackable L2 Managed Switch')), ((1, 3, 6, 1, 2, 1, 1, 2, 0), ObjectIdentifier('1.3.6.1.4.1.171.10.94.1')), ((1, 3, 6, 1, 2, 1, 1, 3, 0), TimeTicks('512281800')), ((1, 3, 6, 1, 2, 1, 1, 4, 0), OctetString(''))] """ return self.session_read.getnext(*map(str_to_oid, args)) @log def getbulk(self, rows=None, *args): """Return same as getnext method, but use rows number.""" if rows is None: rows = self.settings_read["bulk_rows"] return self.session_read.getbulk(rows, *map(str_to_oid, args)) @log def set(self, *args): #TODO: set more than one values return self.session_write.set(args) def walk(self, oid): """Collect all rows in given OID.""" oid = str_to_oid(oid) result = [] walker = Walker(self, oid, use_bulk=self.settings_read["use_bulk"], bulk_rows=self.settings_read["bulk_rows"]) for rows in walker: result.extend(rows) return result
{ "repo_name": "blackwithwhite666/snmp_orm", "path": "snmp_orm/adapters/base.py", "copies": "1", "size": "5402", "license": "mit", "hash": 3936374002376128500, "line_mean": 29.6931818182, "line_max": 87, "alpha_frac": 0.5497963717, "autogenerated": false, "ratio": 3.7988748241912798, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4848671195891279, "avg_score": null, "num_lines": null }
"""Abstract API classes.""" import json JSON_RPC_VERSION = '2.0' class AbstractAPI: """Abstract interface of all children APIs. This is repsonsible for setting the right attributes that ``Request`` uses to build the request dict.""" def __init__(self): with open('params.json') as f: self._methods = json.load(f) def __getattr__(self, name): print('METHODS:', self._methods) if name in self._methods: return self._make_method(name) else: msg = '{} is not an available method on the {} API.' api_name = self.__class__.__name__ raise AttributeError(msg.format(name, api_name)) def _make_method(self, name): cls_name = self.__class__.__name__.lower() method_name = cls_name + '.' + name def method(**kwargs): self.params = kwargs self.method = method_name return self() # interactive/debugging method.__qualname__ = method_name setattr(self, name, method) return method class Request: """Object representing the request dict. This does not provide any API related functionality, it must be used together with ``AbstractAPI``.""" jsonrpc = JSON_RPC_VERSION method = None params = None id = 0 auth = None def __new__(cls, *args, **kwargs): req = super().__new__(cls) # each request has a unique id req.id = Request.id = Request.id + 1 return req def __call__(self): return { 'jsonrpc': self.jsonrpc, 'method': self.method, 'params': self.params, 'id': self.id, 'auth': self.auth} class API(AbstractAPI, Request): """Base class of all Zabbix APIs."""
{ "repo_name": "modulus-sa/pybbix", "path": "pybbix/api/__init__.py", "copies": "1", "size": "1817", "license": "mit", "hash": -578529627717113600, "line_mean": 24.2361111111, "line_max": 64, "alpha_frac": 0.557512383, "autogenerated": false, "ratio": 4.215777262180975, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5273289645180974, "avg_score": null, "num_lines": null }
"""Abstract API classes.""" JSON_RPC_VERSION = '2.0' class AbstractAPI: """Abstract interface of all children APIs. This is repsonsible for setting the right attributes that ``Request`` uses to build the request dict.""" methods = () def __init__(self): self._method_cache = {} def __getattr__(self, name): if name in self.methods: return self._method_cache.get(name) or self._make_method(name) else: msg = '{} is not an available method on the {} API.' api_name = self.__class__.__name__ raise AttributeError(msg.format(name, api_name)) def _make_method(self, name): cls_name = self.__class__.__name__.lower() method_name = cls_name + '.' + name def method(**kwargs): self.params = kwargs self.method = method_name return self() self._method_cache[name] = method # interactive/debugging method.__qualname__ = method_name return method class Request: """Object representing the request dict. This does not provide any API related functionality, it must be used together with ``AbstractAPI``.""" jsonrpc = JSON_RPC_VERSION method = None params = None id = 0 auth = None def __new__(cls, *args, **kwargs): req = super().__new__(cls) # each request has a unique id req.id = Request.id = Request.id + 1 return req def __call__(self): return { 'jsonrpc': self.jsonrpc, 'method': self.method, 'params': self.params, 'id': self.id, 'auth': self.auth } class API(AbstractAPI, Request): """Base class of all Zabbix APIs."""
{ "repo_name": "laerus/pybbix", "path": "pybbix/api/__init__.py", "copies": "1", "size": "1784", "license": "mit", "hash": -3548922086721326000, "line_mean": 23.7777777778, "line_max": 74, "alpha_frac": 0.5543721973, "autogenerated": false, "ratio": 4.217494089834515, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5271866287134516, "avg_score": null, "num_lines": null }
""" Abstract backends that are backed by simple JSON """ from .base import IAccessBackend, IMutableAccessBackend class IJsonAccessBackend(IAccessBackend): """ This backend reads the permissions from anything that can provide JSON data Notes ----- JSON should look like this:: { "users": { "user1": "hashed_password1", "user2": "hashed_password2", "user3": "hashed_password3", "user4": "hashed_password4", "user5": "hashed_password5", }, "groups": { "admins": [ "user1", "user2" ], "group1": [ "user3" ] }, "admins": [ "user1" ] "packages": { "mypackage": { "groups": { "group1": ["read', "write"], "group2": ["read"], "group3": [], }, "users": { "user1": ["read", "write"], "user2": ["read"], "user3": [], "user5": ["read"], } } } } """ _db = None @property def db(self): """Fetch JSON and cache it for future calls""" if self._db is None: self._db = self._get_db() for key in ["users", "groups", "packages", "pending_users"]: self._db.setdefault(key, {}) self._db.setdefault("admins", []) return self._db def _get_db(self): """ Actually fetch the remote json. This method should return an instance of a child class of IMutableJsonAccessDB. """ raise NotImplementedError def _get_password_hash(self, username): return self.db["users"].get(username) def groups(self, username=None): if not username: return list(self.db["groups"].keys()) ret_groups = [] groups = self.db["groups"] for group_name, users in groups.items(): if username in users: ret_groups.append(group_name) return ret_groups def group_members(self, group): return list(self.db["groups"].get(group, [])) def is_admin(self, username): return username in self.db["admins"] def group_permissions(self, package): result = {} package_data = self.db["packages"].get(package, {}) package_groups = package_data.get("groups", {}) for group, permissions in package_groups.items(): result[group] = permissions return result def user_permissions(self, package): result = {} package_data = self.db["packages"].get(package, {}) package_users = package_data.get("users", {}) for user, permissions in package_users.items(): result[user] = permissions return result def user_package_permissions(self, username): packages = [] for package_name, value in self.db["packages"].items(): package_users = value.get("users", {}) has_perms = username in package_users if has_perms: packages.append( {"package": package_name, "permissions": package_users[username]} ) return packages def group_package_permissions(self, group): packages = [] for package_name, value in self.db["packages"].items(): package_groups = value.get("groups", {}) has_perms = group in package_groups.keys() if has_perms: packages.append( { "package": package_name, "permissions": package_groups.get(group, []), } ) return packages def user_data(self, username=None): admins = self.db["admins"] if username: if username not in self.db["users"]: return None return { "username": username, "admin": username in admins, "groups": self.groups(username), } return [ {"username": username, "admin": username in admins} for username in self.db["users"] ] class IMutableJsonAccessBackend(IJsonAccessBackend, IMutableAccessBackend): """ This backend allows you to store all user and package permissions in a backend that is able to store a json file Notes ----- The format is the same as :class:`~pypicloud.access.base_json.IJsonAccessBackend`, but with the additional fields:: { "pending_users": { "user1": "hashed_password1", "user2": "hashed_password2" }, "allow_registration": true } """ mutable = True def _save(self): """Save the JSON to the backend""" raise NotImplementedError def _set_password_hash(self, username, password_hash): self.db["users"][username] = password_hash self._save() def allow_register(self): return self.db.get("allow_registration", False) def _register(self, username, password): self.db["pending_users"][username] = password self._save() def approve_user(self, username): password = self.db["pending_users"].pop(username, None) if password is not None: self.db["users"][username] = password self._save() def delete_user(self, username): self.db["pending_users"].pop(username, None) self.db["users"].pop(username, None) for package_name, value in self.db["packages"].items(): if "users" in value: value["users"].pop(username, None) for group_name, users in self.db["groups"].items(): try: users.remove(username) except ValueError: pass self._save() def pending_users(self): return list(self.db["pending_users"].keys()) def create_group(self, group): self.db["groups"][group] = [] self._save() def delete_group(self, group): self.db["groups"].pop(group, None) self._save() def edit_user_group(self, username, group, add): if add: self.db["groups"][group].append(username) else: self.db["groups"][group].remove(username) self._save() def _init_package(self, package): """ Make sure the root requested package and its child nodes exist in the database. """ self.db["packages"].setdefault(package, {}) self.db["packages"][package].setdefault("groups", {}) self.db["packages"][package].setdefault("users", {}) def edit_group_permission(self, package_name, group, perm, add): if perm != "read" and perm != "write": raise ValueError("Unrecognized permission '%s'" % perm) self._init_package(package_name) package = self.db["packages"][package_name] if group not in package["groups"]: package["groups"][group] = [] if add: group_perms = package["groups"][group] if perm not in group_perms: group_perms.append(perm) package["groups"][group] = group_perms else: package["groups"][group].remove(perm) if package["groups"][group] == []: package["groups"].pop(group) self._save() def edit_user_permission(self, package_name, username, perm, add): if perm != "read" and perm != "write": raise ValueError("Unrecognized permission '%s'" % perm) self._init_package(package_name) package = self.db["packages"][package_name] if username not in package["users"]: package["users"][username] = [] if add: user_perms = package["users"][username] if perm not in user_perms: user_perms.append(perm) package["users"][username] = user_perms else: try: package["users"][username].remove(perm) except ValueError: pass user_perms = package["users"][username] if user_perms == []: package["users"].pop(username) self._save() def set_user_admin(self, username, admin): if admin: self.db["admins"].append(username) else: self.db["admins"].remove(username) self._save() def set_allow_register(self, allow): self.db["allow_registration"] = allow self._save()
{ "repo_name": "stevearc/pypicloud", "path": "pypicloud/access/base_json.py", "copies": "1", "size": "8953", "license": "mit", "hash": -6314642195883206000, "line_mean": 30.8612099644, "line_max": 85, "alpha_frac": 0.5132357869, "autogenerated": false, "ratio": 4.501256913021619, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00004138045187453447, "num_lines": 281 }
"""Abstract base class and null boundary condition class for conveniently making compliant boundary condition classes for use in wepy. """ import sys import logging from copy import deepcopy from collections import defaultdict import random import numpy as np from wepy.walker import Walker class BoundaryConditions(object): """Abstract base class for conveniently making compliant boundary condition classes. Includes empty record group definitions and useful getters for those. """ # records of boundary condition changes (sporadic) BC_FIELDS = () """String names of fields produced in this record group. Boundary condition (BC) records are typically used to report on changes to the state of the BC object. Notes ----- These fields are not critical to the proper functioning of the rest of the wepy framework and can be modified freely. However, reporters specific to this boundary condition probably will make use of these records. """ BC_SHAPES = () """Numpy-style shapes of all fields produced in records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A tuple of ints that specify the shape of the field element array. B. Ellipsis, indicating that the field is variable length and limited to being a rank one array (e.g. (3,) or (1,)). C. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. Note that the shapes must be tuple and not simple integers for rank-1 arrays. Option B will result in the special h5py datatype 'vlen' and should not be used for large datasets for efficiency reasons. """ BC_DTYPES = () """Specifies the numpy dtypes to be used for records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A `numpy.dtype` object. D. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. """ BC_RECORD_FIELDS = () """Optional, names of fields to be selected for truncated representation of the record group. These entries should be strings that are previously contained in the 'FIELDS' class constant. While strictly no constraints on to which fields can be added here you should only choose those fields whose features could fit into a plaintext csv or similar format. """ # warping (sporadic) WARPING_FIELDS = ('walker_idx', 'target_idx', 'weight') """String names of fields produced in this record group. Warping records are typically used to report whenever a walker satisfied the boundary conditions and was warped and had its state changed. Warnings -------- Be careful when modifying these fields as they may be integrated with other wepy framework features. Namely recognition of discontinuous warping events for making contiguous trajectories from cloning and merging lineages. The behavior of whether or not a warping event is discontinuous is given by a `BoundaryCondition` class's `warping_discontinuity` which likely depends on the existence of particular fields. """ WARPING_SHAPES = ((1,), (1,), (1,)) """Numpy-style shapes of all fields produced in records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A tuple of ints that specify the shape of the field element array. B. Ellipsis, indicating that the field is variable length and limited to being a rank one array (e.g. (3,) or (1,)). C. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. Note that the shapes must be tuple and not simple integers for rank-1 arrays. Option B will result in the special h5py datatype 'vlen' and should not be used for large datasets for efficiency reasons. """ WARPING_DTYPES = (np.int, np.int, np.float) """Specifies the numpy dtypes to be used for records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A `numpy.dtype` object. D. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. """ WARPING_RECORD_FIELDS = ('walker_idx', 'target_idx', 'weight') """Optional, names of fields to be selected for truncated representation of the record group. These entries should be strings that are previously contained in the 'FIELDS' class constant. While strictly no constraints on to which fields can be added here you should only choose those fields whose features could fit into a plaintext csv or similar format. """ # progress towards the boundary conditions (continual) PROGRESS_FIELDS = () """String names of fields produced in this record group. Progress records are typically used to report on measures of walkers at each cycle. Notes ----- These fields are not critical to the proper functioning of the rest of the wepy framework and can be modified freely. However, reporters specific to this boundary condition probably will make use of these records. """ PROGRESS_SHAPES = () """Numpy-style shapes of all fields produced in records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A tuple of ints that specify the shape of the field element array. B. Ellipsis, indicating that the field is variable length and limited to being a rank one array (e.g. (3,) or (1,)). C. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. Note that the shapes must be tuple and not simple integers for rank-1 arrays. Option B will result in the special h5py datatype 'vlen' and should not be used for large datasets for efficiency reasons. """ PROGRESS_DTYPES = () """Specifies the numpy dtypes to be used for records. There should be the same number of elements as there are in the corresponding 'FIELDS' class constant. Each entry should either be: A. A `numpy.dtype` object. D. None, indicating that the first instance of this field will not be known until runtime. Any field that is returned by a record producing method will automatically interpreted as None if not specified here. """ PROGRESS_RECORD_FIELDS = () """Optional, names of fields to be selected for truncated representation of the record group. These entries should be strings that are previously contained in the 'FIELDS' class constant. While strictly no constraints on to which fields can be added here you should only choose those fields whose features could fit into a plaintext csv or similar format. """ def __init__(self, **kwargs): """Null constructor accepts and ignores any key word arguments. """ pass def bc_field_names(self): """Access the class level FIELDS constant for this record group.""" return self.BC_FIELDS def bc_field_shapes(self): """Access the class level SHAPES constant for this record group.""" return self.BC_SHAPES def bc_field_dtypes(self): """Access the class level DTYPES constant for this record group.""" return self.BC_DTYPES def bc_fields(self): """Returns a list of zipped field specs. Returns ------- record_specs : list of tuple A list of the specs for each field, a spec is a tuple of type (field_name, shape_spec, dtype_spec) """ return list(zip(self.bc_field_names(), self.bc_field_shapes(), self.bc_field_dtypes())) def bc_record_field_names(self): """Access the class level RECORD_FIELDS constant for this record group.""" return self.BC_RECORD_FIELDS def warping_field_names(self): """Access the class level FIELDS constant for this record group.""" return self.WARPING_FIELDS def warping_field_shapes(self): """Access the class level SHAPES constant for this record group.""" return self.WARPING_SHAPES def warping_field_dtypes(self): """Access the class level DTYPES constant for this record group.""" return self.WARPING_DTYPES def warping_fields(self): """Returns a list of zipped field specs. Returns ------- record_specs : list of tuple A list of the specs for each field, a spec is a tuple of type (field_name, shape_spec, dtype_spec) """ return list(zip(self.warping_field_names(), self.warping_field_shapes(), self.warping_field_dtypes())) def warping_record_field_names(self): """Access the class level RECORD_FIELDS constant for this record group.""" return self.WARPING_RECORD_FIELDS def progress_field_names(self): """Access the class level FIELDS constant for this record group.""" return self.PROGRESS_FIELDS def progress_field_shapes(self): """Access the class level SHAPES constant for this record group.""" return self.PROGRESS_SHAPES def progress_field_dtypes(self): """Access the class level DTYPES constant for this record group.""" return self.PROGRESS_DTYPES def progress_fields(self): """Returns a list of zipped field specs. Returns ------- record_specs : list of tuple A list of the specs for each field, a spec is a tuple of type (field_name, shape_spec, dtype_spec) """ return list(zip(self.progress_field_names(), self.progress_field_shapes(), self.progress_field_dtypes())) def progress_record_field_names(self): """Access the class level RECORD_FIELDS constant for this record group.""" return self.PROGRESS_RECORD_FIELDS def warp_walkers(self, walkers, cycle): """Apply boundary condition logic to walkers. If walkers satisfy the boundary conditions then they will be 'warped' and have a corresponding state change take place. Each event recorded is returned as a single dictionary-style record in 'warp_data' list. These records correspond to the 'WARPING' record group. Additional data calculated on walkers may be returned in the single 'progress_data' dictionary-style record, which corresponds to the 'PROGRESS' record group. Any changes to the internal state of the boundary condition object (e.g. modification of parameters) should be recorded in at least one dictionary-style record in the 'bc_data' list. This corresponds to the 'BC' record group. Parameters ---------- walkers : list of walkers A list of objects implementing the Walker interface cycle : int The index of the cycle this is for. Used to generate proper records. Returns ------- new_walkers : list of walkers A list of objects implementing the Walker interface, that have had boundary condition logic applied. warp_data : list of dict of str : value A list of dictionary style records for each warping event that occured. bc_data : list of dict of str : value A list of dictionary style records for each boundary condition state change event record that occured. progress_data : dict of str : list of value Dictionary style progress records. The values should be lists corresponding to each walker. """ raise NotImplementedError @classmethod def warping_discontinuity(cls, warping_record): """Given a warping record returns either True for a discontiuity occured or False if a discontinuity did not occur. Parameters ---------- warping_record : tuple A tuple record of type 'WARPING' Returns ------- is_discontinuous : bool True if discontinuous warping record False if continuous. """ raise NotImplementedError class NoBC(BoundaryConditions): """Boundary conditions class that does nothing. You may use this class as a stub in order to have an boundary condition class. However, this is not necessary since boundary conditions are optional in the sim_manager anyhow. """ def warp_walkers(self, walkers, cycle): """Apply boundary condition logic to walkers, of which there is none. Simply returns all walkers provided with empty records data since there is nothing to do. Parameters ---------- walkers : list of walkers A list of objects implementing the Walker interface cycle : int The index of the cycle this is for. Used to generate proper records. Returns ------- new_walkers : list of walkers A list of objects implementing the Walker interface, that have had boundary condition logic applied. warp_data : list of dict of str : value A list of dictionary style records for each warping event that occured. bc_data : list of dict of str : value A list of dictionary style records for each boundary condition state change event record that occured. progress_data : dict of str : list of value Dictionary style progress records. The values should be lists corresponding to each walker. """ warp_data = [] bc_data = [] progress_data = {} return walkers, warp_data, bc_data, progress_data @classmethod def warping_discontinuity(cls, warping_record): # documented in superclass # always return false return False class RandomBC(BoundaryConditions): """Boundary conditions that randomly warps both continuously and discontinuously. Can be used with any system as it won't actually mutate states. """ # records of boundary condition changes (sporadic) BC_FIELDS = ('ping',) BC_SHAPES = ((1,),) BC_DTYPES = (np.int,) BC_RECORD_FIELDS = ('ping',) # warping fields are directly inherited # progress towards the boundary conditions (continual) PROGRESS_FIELDS = ('weight',) PROGRESS_SHAPES = (Ellipsis,) PROGRESS_DTYPES = (np.float,) PROGRESS_RECORD_FIELDS = ('weight',) DISCONTINUITY_TARGET_IDXS = (0,) def warp_walkers(self, walkers, cycle): ## warping walkers # just return the same walkers new_walkers = deepcopy(walkers) ## warping data warp_data = [] # generate warping data: 50% of the time generate a warping # event, 25% is discontinuous (target 0), and 25% is # continuous (target 1) for walker_idx, walker in enumerate(walkers): # warping event? if random.random() >= 0.5: # discontinuous? if random.random() >= 0.5: warp_record = { 'walker_idx' : np.array([walker_idx]), 'target_idx' : np.array([0]), 'weight' : np.array([walker.weight]), } warp_data.append(warp_record) # continuous else: warp_record = { 'walker_idx' : np.array([walker_idx]), 'target_idx' : np.array([1]), 'weight' : np.array([walker.weight]), } warp_data.append(warp_record) ## BC data bc_data = [] # choose whether to generate a bc record if random.random() >= 0.5: bc_data.append({'ping' : np.array([1])}) ## Progress data # just set the walker progress to be its weight so there is a # number there progress_data = {'weight' : [walker.weight for walker in walkers] } return new_walkers, warp_data, bc_data, progress_data
{ "repo_name": "ADicksonLab/wepy", "path": "src/wepy/boundary_conditions/boundary.py", "copies": "1", "size": "17437", "license": "mit", "hash": 2876335045126917600, "line_mean": 30.5316455696, "line_max": 88, "alpha_frac": 0.646441475, "autogenerated": false, "ratio": 4.671042057326547, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.003215945725243327, "num_lines": 553 }
"""Abstract base classe for vCard Properties. """ import warnings from abc import abstractmethod, ABCMeta from collections.abc import Sequence from ..param import from_string as param_from_string from ..tools.base import CRLF from ..tools.lexer import escape_value, split_list __all__ = ["Prop", "StringProp", "TupleProp"] props = dict() class PropRegisterType(ABCMeta, type): """Customized metaclass for Properties. New properties are added to the props dict to allows for directory access to these object. """ def __init__(cls, name, bases, nmspc): super().__init__(name, bases, nmspc) if cls.name is not None: props[cls.name] = cls class Prop(object, metaclass=PropRegisterType): """Base class for Vcard properties. A property has a unique name, a list of 0 or more properties, and a value. URL: http://www.imc.org/pdi/vcard-21.txt """ def __and__(p1, p2): """Compare types.""" if isinstance(p2, Prop): return p1.name == p2.name elif isinstance(p2, Sequence ): return p1.name == p2[1] else: raise TypeError("Cannot compare with object.") def __eq__(p1, p2): return (p1 & p2 and p1.groups == p2.groups and p1.params == p2.params and p1.value == p2.value) def __init__(self, value=None, params=None, groups=None): groups = list() if groups is None else groups params = list() if params is None else params self.groups = groups self._check_params(params) self.params = params self.value = value authorized_params = ["ENCODING"] @classmethod def _check_params(cls, params): for param in params: if param.name not in cls.authorized_params: raise ValueError("unauthorized param: {!s}".format(param)) def format(self): """Format property as vCard specification.""" groups = [g + "." for g in self.groups] params = [";" + p.format() for p in self.params] groups_name_params = "".join(groups) + self.name + "".join(params) return groups_name_params + ":" + self.format_value() + CRLF @abstractmethod def format_value(self): pass @classmethod def from_tuple(cls, tpl): groups, name, params, value = tpl params = [param_from_string(p) for p in params] value = cls.from_tuple_value(value) return cls(value, params, groups) @classmethod @abstractmethod def from_tuple_value(cls, value): pass name = None @property def value(self): return self._value @value.setter @abstractmethod def value(self, value): pass class ListProp(Prop): @Prop.value.setter def value(self, value): self._value = list(value) def __init__(self, value=None, params=None, groups=None): value = list if value is None else value super().__init__(value, params, groups) def format_value(self): value = [escape_value(p) for p in self.value] return self.sep.join(value) @classmethod def from_tuple_value(cls, value): return split_list(value, cls.sep) sep = None class StringProp(Prop): def __init__(self, value="", params=None, groups=None): super().__init__(value, params, groups) def format_value(self): return escape_value(self.value) @classmethod def from_tuple_value(cls, value): return value @Prop.value.setter def value(self, value): self._value = str(value) class TupleProp(Prop): @Prop.value.setter def value(self, value): self._value = tuple(value) def __init__(self, value=None, params=None, groups=None): value = list if value is None else value super().__init__(value, params, groups) def format_value(self): value = [escape_value(p) for p in self.value] return self.sep.join(value) @classmethod def from_tuple_value(cls, value): return split_list(value, cls.sep) sep = None
{ "repo_name": "Jorispilot/pycard", "path": "pycard/prop/base.py", "copies": "1", "size": "4158", "license": "mit", "hash": 581164346361991600, "line_mean": 24.6666666667, "line_max": 74, "alpha_frac": 0.6017316017, "autogenerated": false, "ratio": 3.8357933579335795, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9935038677054704, "avg_score": 0.0004972565157750343, "num_lines": 162 }
"""Abstract Base Classes (ABCs) for OpenAPI renderers.""" import abc from docutils import nodes from docutils.statemachine import ViewList from sphinx.util.nodes import nested_parse_with_titles class Renderer(metaclass=abc.ABCMeta): """Base class for OpenAPI renderers.""" def __init__(self, state, options): self._state = state self._options = options @property @abc.abstractmethod def option_spec(self): """Renderer options and their converting functions.""" @abc.abstractmethod def render(self, spec): """Render a given OpenAPI spec.""" class RestructuredTextRenderer(Renderer): """Base class for reStructuredText OpenAPI renderers. Docutils DOM manipulation is quite a tricky task that requires passing dozen arguments around. Because of that a lot of Sphinx extensions instead of constructing DOM nodes directly produce and parse reStructuredText. This Sphinx extension is not an exception, and that's why this class exists. It's a convenient extension of :class:`Renderer` that converts produced markup text into docutils DOM elements. """ def render(self, spec): viewlist = ViewList() for line in self.render_restructuredtext_markup(spec): viewlist.append(line, "<openapi>") node = nodes.section() node.document = self._state.document nested_parse_with_titles(self._state, viewlist, node) return node.children
{ "repo_name": "ikalnytskyi/sphinxcontrib-openapi", "path": "sphinxcontrib/openapi/renderers/abc.py", "copies": "1", "size": "1485", "license": "bsd-2-clause", "hash": 8178030508618738000, "line_mean": 31.2826086957, "line_max": 78, "alpha_frac": 0.698989899, "autogenerated": false, "ratio": 4.527439024390244, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 46 }
"""Abstract base classes for common Model functionality""" from datetime import datetime from django.db import models class LicensedModel(models.Model): """Abstract base class for models with a license""" # Class "constants" # For now, we don't set a default value on the field so we # can differentiate between the state where the user has # selected the license and when they haven't. If we did set a # default, this would be it DEFAULT_LICENSE = 'CC BY-NC-SA' LICENSES = ( ('CC BY-NC-SA', u'Attribution-NonCommercial-ShareAlike Creative Commons'), ('CC BY-NC', u'Attribution-NonCommercial Creative Commons'), ('CC BY-NC-ND', u'Attribution-NonCommercial-NoDerivs Creative Commons'), ('CC BY', u'Attribution Creative Commons'), ('CC BY-SA', u'Attribution-ShareAlike Creative Commons'), ('CC BY-ND', u'Attribution-NoDerivs Creative Commons'), ('none', u'None (All rights reserved)') ) # Fields license = models.CharField(max_length=25, choices=LICENSES, blank=True) class Meta: """Model metadata options""" abstract = True def get_license_name(self, code): """Convert a license's code to its full name Arguments: code -- String representing the first element of a tuple in LICENSES. This is what is stored in the database for a LicensedModel. """ licenses = dict(self.LICENSES) return licenses[code] def license_name(self): """ Convert the license code to a more human-readable version """ return self.get_license_name(self.license) class PublishedModel(models.Model): """Abstract base class for models with publication information""" # Class-level "constants" STATUS = ( (u'pending', u'pending'), (u'draft', u'draft'), (u'staged', u'staged'), (u'published', u'published'), ) DEFAULT_STATUS = u'draft' # Fields status = models.CharField(max_length=10, choices=STATUS, default=DEFAULT_STATUS) published = models.DateTimeField(blank=True, null=True) class Meta: """Model metadata options""" abstract = True @property def never_published(self): """Check if the model has ever been published""" return self.published == None # Signal handlers def set_date_on_published(sender, instance, **kwargs): """Set the published date of a story on status change For models inheriting from PublishedModel. Should be connected to the pre_save signal. """ try: old_instance = sender.objects.get(pk=instance.pk) except sender.DoesNotExist: # Object is new, so field won't have changed. # Just check status. if instance.status == 'published': instance.published = datetime.now() else: if (instance.status == 'published' and old_instance.status != 'published'): instance.published = datetime.now() class TimestampedModel(models.Model): """ Abstract base class that provides created and last edited fields """ created = models.DateTimeField(auto_now_add=True) last_edited = models.DateTimeField(auto_now=True) class Meta: """Model metadata options""" abstract = True class WeightedModel(models.Model): """ Abstract base class for models with a field that defines the relative "weight" of instances when sorting """ # Fields weight = models.IntegerField(default=0) class Meta: """Model metadata options""" abstract = True def get_weight(self): """ Calculate a new value for the weight fieldi This should be implemented in subclasses that inherit from weighted model. """ raise NotImplementedError
{ "repo_name": "denverfoundation/storybase", "path": "apps/storybase/models/base.py", "copies": "1", "size": "3931", "license": "mit", "hash": -4746978225511716000, "line_mean": 29.7109375, "line_max": 81, "alpha_frac": 0.6291020097, "autogenerated": false, "ratio": 4.300875273522976, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5429977283222975, "avg_score": null, "num_lines": null }
"""Abstract base classes for Kalman filtering / smoothing.""" import abc import collections import re import attrdict import numpy as np import numpy.ma as ma import numpy.linalg from .. import utils class DTFilter(metaclass=abc.ABCMeta): """Discrete-time Kalman filter/smoother abstract base class.""" def __init__(self, model, x=None, Px=None, **options): """Create a discrete-time Kalman filter. Parameters ---------- model : The underlying system model. """ self.model = model """The underlying system model.""" self.x = model.x0() if x is None else np.asarray(x) """State vector mean.""" self.Px = model.Px0() if Px is None else np.asarray(Px) """State vector covariance.""" self.k = options.get('k', 0) """Time index.""" self.L = options.get('L', 0.0) """Measurement log-likelihood.""" nq = getattr(model, 'nq', 0) nx = model.nx base_shape = self.x.shape[:-1] self.base_shape = base_shape """Base shape of broadcasting.""" self.dL_dq = options.get('dL_dq', np.zeros(base_shape + (nq,))) """Measurement log-likelihood derivative.""" self.d2L_dq2 = options.get('dL_dq', np.zeros(base_shape + (nq, nq))) """Measurement log-likelihood derivative.""" self.dx_dq = self._get_initial('dx_dq', options, (nq, nx)) """State vector derivative.""" self.dPx_dq = self._get_initial('dPx_dq', options, (nq, nx, nx)) """State vector covariance derivative.""" self.d2x_dq2 = self._get_initial('d2x_dq2', options, (nq, nq, nx)) """State vector derivative.""" self.d2Px_dq2 = self._get_initial('d2Px_dq2', options, (nq, nq, nx, nx)) """State vector covariance derivative.""" def _get_initial(self, key, options, shape): try: return np.asarray(options[key]) except KeyError: try: attr = key.replace('x', 'x0') return getattr(self.model, attr)() except AttributeError: return np.zeros(self.base_shape + shape) @abc.abstractmethod def predict(self): """Predict the state distribution at a next time sample.""" raise NotImplementedError("Pure abstract method.") @abc.abstractmethod def correct(self, y): """Correct the state distribution, given the measurement vector.""" raise NotImplementedError("Pure abstract method.") def smoother_correction(self, xpred, Pxpred, Pxf, xsmooth, Pxsmooth): PxIpred = np.linalg.inv(Pxpred) K = np.einsum('...ij,...jk', Pxf, PxIpred) e = xsmooth - xpred x_inc = np.einsum('...ij,...j', K, e) Px_inc = np.einsum('...ij,...jk,...lk', K, Pxsmooth - Pxpred, K) return x_inc, Px_inc def filter(self, y): y = np.asanyarray(y) N = len(y) x = np.zeros((N,) + self.x.shape) Px = np.zeros((N,) + self.x.shape + (self.model.nx,)) for k in range(N): x[k], Px[k] = self.correct(y[k]) if k < N - 1: self.predict() return x, Px def smooth(self, y): y = np.asanyarray(y) N = len(y) x = np.zeros((N,) + np.shape(self.x)) xpred = np.zeros_like(x) Px = np.zeros((N,) + np.shape(self.x) + (self.model.nx,)) Pxpred = np.zeros_like(Px) Pxfpred = np.zeros((N - 1,) + np.shape(self.x) + (self.model.nx,)) xpred[0] = self.x Pxpred[0] = self.Px for k in range(N): x[k], Px[k] = self.correct(y[k]) if k < N - 1: xpred[k+1], Pxpred[k+1] = self.predict() Pxfpred[k] = self.prediction_crosscov() for k in reversed(range(1, N)): x_inc, Px_inc = self.smoother_correction( xpred[k], Pxpred[k], Pxfpred[k-1], x[k], Px[k] ) x[k - 1] += x_inc Px[k - 1] += Px_inc return x, Px def pem_merit(self, y): y = np.asanyarray(y) N = len(y) for k in range(N): self.correct(y[k]) self.update_likelihood() if k < N - 1: self.predict() return self.L def pem_gradient(self, y): y = np.asanyarray(y) N = len(y) for k in range(N): self.correct(y[k]) self.correction_diff() self.update_likelihood() self.likelihood_diff() if k < N - 1: self.predict() self.prediction_diff() return self.dL_dq def pem_hessian(self, y): y = np.asanyarray(y) N = len(y) for k in range(N): self.correct(y[k]) self.correction_diff() self.correction_diff2() self.update_likelihood() self.likelihood_diff() self.likelihood_diff2() if k < N - 1: self.predict() self.prediction_diff() self.prediction_diff2() return self.d2L_dq2
{ "repo_name": "dimasad/qwfilter", "path": "ceacoest/kalman/base.py", "copies": "2", "size": "5379", "license": "mit", "hash": 3304569071005492000, "line_mean": 29.5625, "line_max": 80, "alpha_frac": 0.4995352296, "autogenerated": false, "ratio": 3.5598941098610193, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9953018003883016, "avg_score": 0.02128226711560045, "num_lines": 176 }
"""Abstract base classes for kernel client channels""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import abc from IPython.utils.py3compat import with_metaclass class ChannelABC(with_metaclass(abc.ABCMeta, object)): """A base class for all channel ABCs.""" @abc.abstractmethod def start(self): pass @abc.abstractmethod def stop(self): pass @abc.abstractmethod def is_alive(self): pass class ShellChannelABC(ChannelABC): """ShellChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.ShellChannel` """ @abc.abstractproperty def allow_stdin(self): pass @abc.abstractmethod def execute(self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None): pass @abc.abstractmethod def complete(self, text, line, cursor_pos, block=None): pass @abc.abstractmethod def inspect(self, oname, detail_level=0): pass @abc.abstractmethod def history(self, raw=True, output=False, hist_access_type='range', **kwargs): pass @abc.abstractmethod def kernel_info(self): pass @abc.abstractmethod def shutdown(self, restart=False): pass class IOPubChannelABC(ChannelABC): """IOPubChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.IOPubChannel` """ @abc.abstractmethod def flush(self, timeout=1.0): pass class StdInChannelABC(ChannelABC): """StdInChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.StdInChannel` """ @abc.abstractmethod def input(self, string): pass class HBChannelABC(ChannelABC): """HBChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.HBChannel` """ @abc.abstractproperty def time_to_dead(self): pass @abc.abstractmethod def pause(self): pass @abc.abstractmethod def unpause(self): pass @abc.abstractmethod def is_beating(self): pass
{ "repo_name": "mattvonrocketstein/smash", "path": "smashlib/ipy3x/kernel/channelsabc.py", "copies": "1", "size": "2319", "license": "mit", "hash": -752498456167088000, "line_mean": 18.6525423729, "line_max": 82, "alpha_frac": 0.6567485985, "autogenerated": false, "ratio": 4.104424778761062, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5261173377261061, "avg_score": null, "num_lines": null }
"""Abstract base classes for kernel client channels""" #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import abc from IPython.utils.py3compat import with_metaclass class ChannelABC(with_metaclass(abc.ABCMeta, object)): """A base class for all channel ABCs.""" @abc.abstractmethod def start(self): pass @abc.abstractmethod def stop(self): pass @abc.abstractmethod def is_alive(self): pass class ShellChannelABC(ChannelABC): """ShellChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.ShellChannel` """ @abc.abstractproperty def allow_stdin(self): pass @abc.abstractmethod def execute(self, code, silent=False, store_history=True, user_variables=None, user_expressions=None, allow_stdin=None): pass @abc.abstractmethod def complete(self, text, line, cursor_pos, block=None): pass @abc.abstractmethod def object_info(self, oname, detail_level=0): pass @abc.abstractmethod def history(self, raw=True, output=False, hist_access_type='range', **kwargs): pass @abc.abstractmethod def kernel_info(self): pass @abc.abstractmethod def shutdown(self, restart=False): pass class IOPubChannelABC(ChannelABC): """IOPubChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.IOPubChannel` """ @abc.abstractmethod def flush(self, timeout=1.0): pass class StdInChannelABC(ChannelABC): """StdInChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.StdInChannel` """ @abc.abstractmethod def input(self, string): pass class HBChannelABC(ChannelABC): """HBChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.channels.HBChannel` """ @abc.abstractproperty def time_to_dead(self): pass @abc.abstractmethod def pause(self): pass @abc.abstractmethod def unpause(self): pass @abc.abstractmethod def is_beating(self): pass
{ "repo_name": "Lightmatter/django-inlineformfield", "path": ".tox/py27/lib/python2.7/site-packages/IPython/kernel/channelsabc.py", "copies": "7", "size": "2584", "license": "mit", "hash": 8081757743819838000, "line_mean": 21.0854700855, "line_max": 82, "alpha_frac": 0.621130031, "autogenerated": false, "ratio": 4.292358803986711, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00031935596980587065, "num_lines": 117 }
"""Abstract base classes for kernel manager and channels.""" #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports. import abc #----------------------------------------------------------------------------- # Channels #----------------------------------------------------------------------------- class ChannelABC(object): """A base class for all channel ABCs.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def start(self): pass @abc.abstractmethod def stop(self): pass @abc.abstractmethod def is_alive(self): pass class ShellChannelABC(ChannelABC): """ShellChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.ShellChannel` """ @abc.abstractproperty def allow_stdin(self): pass @abc.abstractmethod def execute(self, code, silent=False, store_history=True, user_variables=None, user_expressions=None, allow_stdin=None): pass @abc.abstractmethod def complete(self, text, line, cursor_pos, block=None): pass @abc.abstractmethod def object_info(self, oname, detail_level=0): pass @abc.abstractmethod def history(self, raw=True, output=False, hist_access_type='range', **kwargs): pass @abc.abstractmethod def kernel_info(self): pass @abc.abstractmethod def shutdown(self, restart=False): pass class IOPubChannelABC(ChannelABC): """IOPubChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.IOPubChannel` """ @abc.abstractmethod def flush(self, timeout=1.0): pass class StdInChannelABC(ChannelABC): """StdInChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.StdInChannel` """ @abc.abstractmethod def input(self, string): pass class HBChannelABC(ChannelABC): """HBChannel ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.HBChannel` """ @abc.abstractproperty def time_to_dead(self): pass @abc.abstractmethod def pause(self): pass @abc.abstractmethod def unpause(self): pass @abc.abstractmethod def is_beating(self): pass #----------------------------------------------------------------------------- # Main kernel manager class #----------------------------------------------------------------------------- class KernelManagerABC(object): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.KernelManager` """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def kernel(self): pass @abc.abstractproperty def shell_channel_class(self): pass @abc.abstractproperty def iopub_channel_class(self): pass @abc.abstractproperty def hb_channel_class(self): pass @abc.abstractproperty def stdin_channel_class(self): pass #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @abc.abstractmethod def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): pass @abc.abstractmethod def stop_channels(self): pass @abc.abstractproperty def channels_running(self): pass @abc.abstractproperty def shell_channel(self): pass @abc.abstractproperty def iopub_channel(self): pass @abc.abstractproperty def stdin_channel(self): pass @abc.abstractproperty def hb_channel(self): pass #-------------------------------------------------------------------------- # Kernel management #-------------------------------------------------------------------------- @abc.abstractmethod def start_kernel(self, **kw): pass @abc.abstractmethod def shutdown_kernel(self, now=False, restart=False): pass @abc.abstractmethod def restart_kernel(self, now=False, **kw): pass @abc.abstractproperty def has_kernel(self): pass @abc.abstractmethod def interrupt_kernel(self): pass @abc.abstractmethod def signal_kernel(self, signum): pass @abc.abstractmethod def is_alive(self): pass
{ "repo_name": "noslenfa/tdjangorest", "path": "uw/lib/python2.7/site-packages/IPython/kernel/managerabc.py", "copies": "2", "size": "5131", "license": "apache-2.0", "hash": 6771036148182945000, "line_mean": 21.8044444444, "line_max": 82, "alpha_frac": 0.5264081076, "autogenerated": false, "ratio": 4.895992366412214, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6422400474012214, "avg_score": null, "num_lines": null }
"""Abstract base classes for laying out a 3-tier software architecture.""" import abc class DataTier(metaclass=abc.ABCMeta): """Data tier - controls storage and retrieval of data.""" @abc.abstractmethod def __init__(self, data_store): """ Make a new DataTier object. `data_store` is the actual storage for the object to use. """ self.data_store = data_store @abc.abstractmethod def store(self, key, data): """Store some data.""" pass @abc.abstractmethod def retrieve(self, key): """Retrieve some data.""" pass class LogicTier(metaclass=abc.ABCMeta): """Business logic tier - where the data processing is.""" def __init__(self, data_tier): """ Make a LogicTier object. `data_tier` is the DataTier object to base the new object on. """ self.data_tier = data_tier @abc.abstractmethod def process_and_load(self, key, func=lambda x: x): """Process the data at a given key.""" return func(self.data_tier.retrieve(key)) @abc.abstractmethod def process_and_store(self, key, data, func=lambda x: x): """Process the `data` parameter with `func`, and store it at `key`.""" self.data_tier.store(key, func(data)) class PresentationTier(metaclass=abc.ABCMeta): """Presentation tier - the user-facing stuff.""" @abc.abstractmethod def __init__(self): """Make a PresentationTier object.""" self.logic_tier = LogicTier(DataTier(None)) @abc.abstractmethod def interact(self): """Interact with the user once.""" string = input('> ') tokens = string.split() if tokens[0] == 'load' and len(tokens) == 2: print('data at {} is {}'.format( repr(tokens[1]), repr(self.logic_tier.process_and_load(tokens[1])))) elif tokens[0] == 'store' and len(tokens) == 3: self.logic_tier.process_and_store(tokens[1], tokens[2]) print('datum {} stored at {}'.format(repr(tokens[2]), repr(tokens[1]))) else: print('invalid cmd line')
{ "repo_name": "jmanuel1/patterns", "path": "threetier.py", "copies": "1", "size": "2223", "license": "mit", "hash": -4652129841094411000, "line_mean": 27.8701298701, "line_max": 78, "alpha_frac": 0.5735492578, "autogenerated": false, "ratio": 3.969642857142857, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5043192114942857, "avg_score": null, "num_lines": null }
"""Abstract Base classes implementing the Runner interface. Runner Interface ---------------- All a runner needs to implement is the 'run_segment' method which should accept a walker and a spec for the length of the segment to run (e.g. number of dynamics steps). Additionally, any number of optional key word arguments should be given. As a matter of convention, classes accessory to a runner (such as State, Walker, Worker, etc.) should also be put in the same module as the runner. See the openmm.py module for an example. """ from eliot import log_call, start_action class Runner(object): """Abstract base class for the Runner interface.""" @log_call(include_args=[], include_result=False) def pre_cycle(self, **kwargs): """Perform pre-cycle behavior. run_segment will be called for each walker so this allows you to perform changes of state on a per-cycle basis. Parameters ---------- kwargs : key-word arguments Key-value pairs to be interpreted by each runner implementation. """ # by default just pass since subclasses need not implement this pass @log_call(include_args=[], include_result=False) def post_cycle(self, **kwargs): """Perform post-cycle behavior. run_segment will be called for each walker so this allows you to perform changes of state on a per-cycle basis. Parameters ---------- kwargs : key-word arguments Key-value pairs to be interpreted by each runner implementation. """ # by default just pass since subclasses need not implement this pass @log_call(include_args=['segment_length'], include_result=False) def run_segment(self, walker, segment_length, **kwargs): """Run dynamics for the walker. Parameters ---------- walker : object implementing the Walker interface The walker for which dynamics will be propagated. segment_length : int or float The numerical value that specifies how much dynamics are to be run. Returns ------- new_walker : object implementing the Walker interface Walker after dynamics was run, only the state should be modified. """ raise NotImplementedError class NoRunner(Runner): """Stub Runner that just returns the walkers back with the same state. May be useful for testing. """ def run_segment(self, walker, segment_length, **kwargs): # documented in superclass return walker
{ "repo_name": "ADicksonLab/wepy", "path": "src/wepy/runners/runner.py", "copies": "1", "size": "2645", "license": "mit", "hash": -3784388379808332300, "line_mean": 28.3888888889, "line_max": 79, "alpha_frac": 0.6415879017, "autogenerated": false, "ratio": 4.632224168126094, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000968013468013468, "num_lines": 90 }
"""Abstract base classes.""" from functools import wraps class BaseModule: """Abstract base class for API modules.""" def __init__(self, pa): """Construct this object.""" self.pa = pa def _preprocess(self, args): """Apply common transformations to API call arguments.""" def preprocess_dates(args): """Combine date and end_date into a range.""" if 'date' in args: if args.get('period') == 'range' and 'end_date' in args: args['date'] = '{},{}'.format(args['date'], args['end_date']) return args def preprocess_bools(args): """Convert all booleans to integers.""" for arg in args: if type(args[arg]) == bool: args[arg] = int(args[arg]) return args for name, value in locals().items(): if name.startswith('preprocess_') and callable(value): args = value(args) return args def api_method(func): """Decorator for all API call methods.""" @wraps(func) def decorator(self, return_request_args=False, *args, **kwargs): request_args = func(self, *args, **kwargs) request_args.update({ 'method': '{module}.{method}'.format( module=self.__class__.__name__, method=func.__name__)}) request_args = self._preprocess(request_args) if return_request_args: return request_args else: return self.pa.request(**request_args) return decorator
{ "repo_name": "antsar/piwik-api", "path": "piwik_api/base.py", "copies": "1", "size": "1646", "license": "mit", "hash": 3787597691753938400, "line_mean": 31.92, "line_max": 72, "alpha_frac": 0.5218712029, "autogenerated": false, "ratio": 4.534435261707989, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.555630646460799, "avg_score": null, "num_lines": null }
"""Abstract base classes. These are necessary to avoid circular imports between core.py and fields.py. .. warning:: This module is treated as private API. Users should not need to use this module directly. """ import typing class FieldABC: """Abstract base class from which all Field classes inherit.""" parent = None name = None root = None def serialize(self, attr, obj, accessor=None): raise NotImplementedError def deserialize(self, value): raise NotImplementedError def _serialize(self, value, attr, obj, **kwargs): raise NotImplementedError def _deserialize(self, value, attr, data, **kwargs): raise NotImplementedError class SchemaABC: """Abstract base class from which all Schemas inherit.""" def dump(self, obj, *, many: typing.Optional[bool] = None): raise NotImplementedError def dumps(self, obj, *, many: typing.Optional[bool] = None): raise NotImplementedError def load( self, data, *, many: typing.Optional[bool] = None, partial=None, unknown=None ): raise NotImplementedError def loads( self, json_data, *, many: typing.Optional[bool] = None, partial=None, unknown=None, **kwargs ): raise NotImplementedError
{ "repo_name": "marshmallow-code/marshmallow", "path": "src/marshmallow/base.py", "copies": "1", "size": "1336", "license": "mit", "hash": 4991475370808876000, "line_mean": 22.8571428571, "line_max": 85, "alpha_frac": 0.6407185629, "autogenerated": false, "ratio": 4.59106529209622, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00020764119601328904, "num_lines": 56 }
"""Abstract base classes related to import.""" from . import _bootstrap from . import _bootstrap_external from . import machinery try: import _frozen_importlib except ImportError as exc: if exc.name != '_frozen_importlib': raise _frozen_importlib = None try: import _frozen_importlib_external except ImportError as exc: _frozen_importlib_external = _bootstrap_external import abc def _register(abstract_cls, *classes): for cls in classes: abstract_cls.register(cls) if _frozen_importlib is not None: try: frozen_cls = getattr(_frozen_importlib, cls.__name__) except AttributeError: frozen_cls = getattr(_frozen_importlib_external, cls.__name__) abstract_cls.register(frozen_cls) class Finder(metaclass=abc.ABCMeta): """Legacy abstract base class for import finders. It may be subclassed for compatibility with legacy third party reimplementations of the import system. Otherwise, finder implementations should derive from the more specific MetaPathFinder or PathEntryFinder ABCs. """ @abc.abstractmethod def find_module(self, fullname, path=None): """An abstract method that should find a module. The fullname is a str and the optional path is a str or None. Returns a Loader object or None. """ class MetaPathFinder(Finder): """Abstract base class for import finders on sys.meta_path.""" # We don't define find_spec() here since that would break # hasattr checks we do to support backward compatibility. def find_module(self, fullname, path): """Return a loader for the module. If no module is found, return None. The fullname is a str and the path is a list of strings or None. This method is deprecated in favor of finder.find_spec(). If find_spec() exists then backwards-compatible functionality is provided for this method. """ if not hasattr(self, 'find_spec'): return None found = self.find_spec(fullname, path) return found.loader if found is not None else None def invalidate_caches(self): """An optional method for clearing the finder's cache, if any. This method is used by importlib.invalidate_caches(). """ _register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.PathFinder, machinery.WindowsRegistryFinder) class PathEntryFinder(Finder): """Abstract base class for path entry finders used by PathFinder.""" # We don't define find_spec() here since that would break # hasattr checks we do to support backward compatibility. def find_loader(self, fullname): """Return (loader, namespace portion) for the path entry. The fullname is a str. The namespace portion is a sequence of path entries contributing to part of a namespace package. The sequence may be empty. If loader is not None, the portion will be ignored. The portion will be discarded if another path entry finder locates the module as a normal module or package. This method is deprecated in favor of finder.find_spec(). If find_spec() is provided than backwards-compatible functionality is provided. """ if not hasattr(self, 'find_spec'): return None, [] found = self.find_spec(fullname) if found is not None: if not found.submodule_search_locations: portions = [] else: portions = found.submodule_search_locations return found.loader, portions else: return None, [] find_module = _bootstrap_external._find_module_shim def invalidate_caches(self): """An optional method for clearing the finder's cache, if any. This method is used by PathFinder.invalidate_caches(). """ _register(PathEntryFinder, machinery.FileFinder) class Loader(metaclass=abc.ABCMeta): """Abstract base class for import loaders.""" def create_module(self, spec): """Return a module to initialize and into which to load. This method should raise ImportError if anything prevents it from creating a new module. It may return None to indicate that the spec should create the new module. """ # By default, defer to default semantics for the new module. return None # We don't define exec_module() here since that would break # hasattr checks we do to support backward compatibility. def load_module(self, fullname): """Return the loaded module. The module must be added to sys.modules and have import-related attributes set properly. The fullname is a str. ImportError is raised on failure. This method is deprecated in favor of loader.exec_module(). If exec_module() exists then it is used to provide a backwards-compatible functionality for this method. """ if not hasattr(self, 'exec_module'): raise ImportError return _bootstrap._load_module_shim(self, fullname) def module_repr(self, module): """Return a module's repr. Used by the module type when the method does not raise NotImplementedError. This method is deprecated. """ # The exception will cause ModuleType.__repr__ to ignore this method. raise NotImplementedError class ResourceLoader(Loader): """Abstract base class for loaders which can return data from their back-end storage. This ABC represents one of the optional protocols specified by PEP 302. """ @abc.abstractmethod def get_data(self, path): """Abstract method which when implemented should return the bytes for the specified path. The path must be a str.""" raise IOError class InspectLoader(Loader): """Abstract base class for loaders which support inspection about the modules they can load. This ABC represents one of the optional protocols specified by PEP 302. """ def is_package(self, fullname): """Optional method which when implemented should return whether the module is a package. The fullname is a str. Returns a bool. Raises ImportError if the module cannot be found. """ raise ImportError def get_code(self, fullname): """Method which returns the code object for the module. The fullname is a str. Returns a types.CodeType if possible, else returns None if a code object does not make sense (e.g. built-in module). Raises ImportError if the module cannot be found. """ source = self.get_source(fullname) if source is None: return None return self.source_to_code(source) @abc.abstractmethod def get_source(self, fullname): """Abstract method which should return the source code for the module. The fullname is a str. Returns a str. Raises ImportError if the module cannot be found. """ raise ImportError @staticmethod def source_to_code(data, path='<string>'): """Compile 'data' into a code object. The 'data' argument can be anything that compile() can handle. The'path' argument should be where the data was retrieved (when applicable).""" return compile(data, path, 'exec', dont_inherit=True) exec_module = _bootstrap_external._LoaderBasics.exec_module load_module = _bootstrap_external._LoaderBasics.load_module _register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter) class ExecutionLoader(InspectLoader): """Abstract base class for loaders that wish to support the execution of modules as scripts. This ABC represents one of the optional protocols specified in PEP 302. """ @abc.abstractmethod def get_filename(self, fullname): """Abstract method which should return the value that __file__ is to be set to. Raises ImportError if the module cannot be found. """ raise ImportError def get_code(self, fullname): """Method to return the code object for fullname. Should return None if not applicable (e.g. built-in module). Raise ImportError if the module cannot be found. """ source = self.get_source(fullname) if source is None: return None try: path = self.get_filename(fullname) except ImportError: return self.source_to_code(source) else: return self.source_to_code(source, path) _register(ExecutionLoader, machinery.ExtensionFileLoader) class FileLoader(_bootstrap_external.FileLoader, ResourceLoader, ExecutionLoader): """Abstract base class partially implementing the ResourceLoader and ExecutionLoader ABCs.""" _register(FileLoader, machinery.SourceFileLoader, machinery.SourcelessFileLoader) class SourceLoader(_bootstrap_external.SourceLoader, ResourceLoader, ExecutionLoader): """Abstract base class for loading source code (and optionally any corresponding bytecode). To support loading from source code, the abstractmethods inherited from ResourceLoader and ExecutionLoader need to be implemented. To also support loading from bytecode, the optional methods specified directly by this ABC is required. Inherited abstractmethods not implemented in this ABC: * ResourceLoader.get_data * ExecutionLoader.get_filename """ def path_mtime(self, path): """Return the (int) modification time for the path (str).""" if self.path_stats.__func__ is SourceLoader.path_stats: raise IOError return int(self.path_stats(path)['mtime']) def path_stats(self, path): """Return a metadata dict for the source pointed to by the path (str). Possible keys: - 'mtime' (mandatory) is the numeric timestamp of last source code modification; - 'size' (optional) is the size in bytes of the source code. """ if self.path_mtime.__func__ is SourceLoader.path_mtime: raise IOError return {'mtime': self.path_mtime(path)} def set_data(self, path, data): """Write the bytes to the path (if possible). Accepts a str path and data as bytes. Any needed intermediary directories are to be created. If for some reason the file cannot be written because of permissions, fail silently. """ _register(SourceLoader, machinery.SourceFileLoader)
{ "repo_name": "arju88nair/projectCulminate", "path": "venv/lib/python3.5/importlib/abc.py", "copies": "45", "size": "10782", "license": "apache-2.0", "hash": -3831667121738100000, "line_mean": 31.7720364742, "line_max": 86, "alpha_frac": 0.6634205157, "autogenerated": false, "ratio": 4.704188481675392, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
"""Abstract base classes related to import.""" from . import _bootstrap from . import machinery from . import util import abc import imp import io import marshal import os.path import sys import tokenize import types import warnings class Loader(metaclass=abc.ABCMeta): """Abstract base class for import loaders.""" @abc.abstractmethod def load_module(self, fullname): """Abstract method which when implemented should load a module. The fullname is a str.""" raise NotImplementedError class Finder(metaclass=abc.ABCMeta): """Abstract base class for import finders.""" @abc.abstractmethod def find_module(self, fullname, path=None): """Abstract method which when implemented should find a module. The fullname is a str and the optional path is a str or None. Returns a Loader object. """ raise NotImplementedError Finder.register(machinery.BuiltinImporter) Finder.register(machinery.FrozenImporter) Finder.register(machinery.PathFinder) class ResourceLoader(Loader): """Abstract base class for loaders which can return data from their back-end storage. This ABC represents one of the optional protocols specified by PEP 302. """ @abc.abstractmethod def get_data(self, path): """Abstract method which when implemented should return the bytes for the specified path. The path must be a str.""" raise NotImplementedError class InspectLoader(Loader): """Abstract base class for loaders which support inspection about the modules they can load. This ABC represents one of the optional protocols specified by PEP 302. """ @abc.abstractmethod def is_package(self, fullname): """Abstract method which when implemented should return whether the module is a package. The fullname is a str. Returns a bool.""" raise NotImplementedError @abc.abstractmethod def get_code(self, fullname): """Abstract method which when implemented should return the code object for the module. The fullname is a str. Returns a types.CodeType.""" raise NotImplementedError @abc.abstractmethod def get_source(self, fullname): """Abstract method which should return the source code for the module. The fullname is a str. Returns a str.""" raise NotImplementedError InspectLoader.register(machinery.BuiltinImporter) InspectLoader.register(machinery.FrozenImporter) class ExecutionLoader(InspectLoader): """Abstract base class for loaders that wish to support the execution of modules as scripts. This ABC represents one of the optional protocols specified in PEP 302. """ @abc.abstractmethod def get_filename(self, fullname): """Abstract method which should return the value that __file__ is to be set to.""" raise NotImplementedError class SourceLoader(_bootstrap.SourceLoader, ResourceLoader, ExecutionLoader): """Abstract base class for loading source code (and optionally any corresponding bytecode). To support loading from source code, the abstractmethods inherited from ResourceLoader and ExecutionLoader need to be implemented. To also support loading from bytecode, the optional methods specified directly by this ABC is required. Inherited abstractmethods not implemented in this ABC: * ResourceLoader.get_data * ExecutionLoader.get_filename """ def path_mtime(self, path): """Return the (int) modification time for the path (str).""" raise NotImplementedError def set_data(self, path, data): """Write the bytes to the path (if possible). Accepts a str path and data as bytes. Any needed intermediary directories are to be created. If for some reason the file cannot be written because of permissions, fail silently. """ raise NotImplementedError class PyLoader(SourceLoader): """Implement the deprecated PyLoader ABC in terms of SourceLoader. This class has been deprecated! It is slated for removal in Python 3.4. If compatibility with Python 3.1 is not needed then implement the SourceLoader ABC instead of this class. If Python 3.1 compatibility is needed, then use the following idiom to have a single class that is compatible with Python 3.1 onwards:: try: from importlib.abc import SourceLoader except ImportError: from importlib.abc import PyLoader as SourceLoader class CustomLoader(SourceLoader): def get_filename(self, fullname): # Implement ... def source_path(self, fullname): '''Implement source_path in terms of get_filename.''' try: return self.get_filename(fullname) except ImportError: return None def is_package(self, fullname): filename = os.path.basename(self.get_filename(fullname)) return os.path.splitext(filename)[0] == '__init__' """ @abc.abstractmethod def is_package(self, fullname): raise NotImplementedError @abc.abstractmethod def source_path(self, fullname): """Abstract method. Accepts a str module name and returns the path to the source code for the module.""" raise NotImplementedError def get_filename(self, fullname): """Implement get_filename in terms of source_path. As get_filename should only return a source file path there is no chance of the path not existing but loading still being possible, so ImportError should propagate instead of being turned into returning None. """ warnings.warn("importlib.abc.PyLoader is deprecated and is " "slated for removal in Python 3.4; " "use SourceLoader instead. " "See the importlib documentation on how to be " "compatible with Python 3.1 onwards.", PendingDeprecationWarning) path = self.source_path(fullname) if path is None: raise ImportError else: return path class PyPycLoader(PyLoader): """Abstract base class to assist in loading source and bytecode by requiring only back-end storage methods to be implemented. This class has been deprecated! Removal is slated for Python 3.4. Implement the SourceLoader ABC instead. If Python 3.1 compatibility is needed, see PyLoader. The methods get_code, get_source, and load_module are implemented for the user. """ def get_filename(self, fullname): """Return the source or bytecode file path.""" path = self.source_path(fullname) if path is not None: return path path = self.bytecode_path(fullname) if path is not None: return path raise ImportError("no source or bytecode path available for " "{0!r}".format(fullname)) def get_code(self, fullname): """Get a code object from source or bytecode.""" warnings.warn("importlib.abc.PyPycLoader is deprecated and slated for " "removal in Python 3.4; use SourceLoader instead. " "If Python 3.1 compatibility is required, see the " "latest documentation for PyLoader.", PendingDeprecationWarning) source_timestamp = self.source_mtime(fullname) # Try to use bytecode if it is available. bytecode_path = self.bytecode_path(fullname) if bytecode_path: data = self.get_data(bytecode_path) try: magic = data[:4] if len(magic) < 4: raise ImportError("bad magic number in {}".format(fullname)) raw_timestamp = data[4:8] if len(raw_timestamp) < 4: raise EOFError("bad timestamp in {}".format(fullname)) pyc_timestamp = marshal._r_long(raw_timestamp) bytecode = data[8:] # Verify that the magic number is valid. if imp.get_magic() != magic: raise ImportError("bad magic number in {}".format(fullname)) # Verify that the bytecode is not stale (only matters when # there is source to fall back on. if source_timestamp: if pyc_timestamp < source_timestamp: raise ImportError("bytecode is stale") except (ImportError, EOFError): # If source is available give it a shot. if source_timestamp is not None: pass else: raise else: # Bytecode seems fine, so try to use it. return marshal.loads(bytecode) elif source_timestamp is None: raise ImportError("no source or bytecode available to create code " "object for {0!r}".format(fullname)) # Use the source. source_path = self.source_path(fullname) if source_path is None: message = "a source path must exist to load {0}".format(fullname) raise ImportError(message) source = self.get_data(source_path) code_object = compile(source, source_path, 'exec', dont_inherit=True) # Generate bytecode and write it out. if not sys.dont_write_bytecode: data = bytearray(imp.get_magic()) data.extend(marshal._w_long(source_timestamp)) data.extend(marshal.dumps(code_object)) self.write_bytecode(fullname, data) return code_object @abc.abstractmethod def source_mtime(self, fullname): """Abstract method. Accepts a str filename and returns an int modification time for the source of the module.""" raise NotImplementedError @abc.abstractmethod def bytecode_path(self, fullname): """Abstract method. Accepts a str filename and returns the str pathname to the bytecode for the module.""" raise NotImplementedError @abc.abstractmethod def write_bytecode(self, fullname, bytecode): """Abstract method. Accepts a str filename and bytes object representing the bytecode for the module. Returns a boolean representing whether the bytecode was written or not.""" raise NotImplementedError
{ "repo_name": "zhouzhenghui/python-for-android", "path": "python3-alpha/python3-src/Lib/importlib/abc.py", "copies": "51", "size": "10725", "license": "apache-2.0", "hash": 7020576731029976000, "line_mean": 34.2796052632, "line_max": 80, "alpha_frac": 0.6335664336, "autogenerated": false, "ratio": 4.990693345742206, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005528248216414863, "num_lines": 304 }
"""Abstract base classes related to import.""" from . import _fileloader2 from . import _bootstrap from . import _bootstrap_external from . import machinery import abc try: from importlib.abc import _register except ImportError: # backporting _register (without caring about builtin or frozen modules) def _register(abstract_cls, *classes): for cls in classes: abstract_cls.register(cls) try: from importlib.abc import Finder except ImportError: class Finder(object): __metaclass__ = abc.ABCMeta """Legacy abstract base class for import finders. It may be subclassed for compatibility with legacy third party reimplementations of the import system. Otherwise, finder implementations should derive from the more specific MetaPathFinder or PathEntryFinder ABCs. """ @abc.abstractmethod def find_module(self, fullname, path=None): """An abstract method that should find a module. The fullname is a str and the optional path is a str or None. Returns a Loader object or None. """ try: from importlib.abc import MetaPathFinder except ImportError: class MetaPathFinder(Finder): """Abstract base class for import finders on sys.meta_path.""" # We don't define find_spec() here since that would break # hasattr checks we do to support backward compatibility. def find_module(self, fullname, path): """Return a loader for the module. If no module is found, return None. The fullname is a str and the path is a list of strings or None. This method is deprecated in favor of finder.find_spec(). If find_spec() exists then backwards-compatible functionality is provided for this method. """ if not hasattr(self, 'find_spec'): return None found = self.find_spec(fullname, path) return found.loader if found is not None else None def invalidate_caches(self): """An optional method for clearing the finder's cache, if any. This method is used by importlib.invalidate_caches(). """ _register(MetaPathFinder, machinery.PathFinder) try: from importlib.abc import PathEntryFinder except ImportError: class PathEntryFinder(Finder): """Abstract base class for path entry finders used by PathFinder.""" # We don't define find_spec() here since that would break # hasattr checks we do to support backward compatibility. def find_loader(self, fullname): """Return (loader, namespace portion) for the path entry. The fullname is a str. The namespace portion is a sequence of path entries contributing to part of a namespace package. The sequence may be empty. If loader is not None, the portion will be ignored. The portion will be discarded if another path entry finder locates the module as a normal module or package. This method is deprecated in favor of finder.find_spec(). If find_spec() is provided than backwards-compatible functionality is provided. """ if not hasattr(self, 'find_spec'): return None, [] found = self.find_spec(fullname) if found is not None: if not found.submodule_search_locations: portions = [] else: portions = found.submodule_search_locations return found.loader, portions else: return None, [] find_module = _bootstrap_external._find_module_shim def invalidate_caches(self): """An optional method for clearing the finder's cache, if any. This method is used by PathFinder.invalidate_caches(). """ _register(PathEntryFinder, machinery.FileFinder) try: from importlib.abc import Loader except ImportError: class Loader(object): __metaclass__ = abc.ABCMeta """Abstract base class for import loaders.""" def create_module(self, spec): """Return a module to initialize and into which to load. This method should raise ImportError if anything prevents it from creating a new module. It may return None to indicate that the spec should create the new module. """ # By default, defer to default semantics for the new module. return None # We don't define exec_module() here since that would break # hasattr checks we do to support backward compatibility. def load_module(self, fullname): """Return the loaded module. The module must be added to sys.modules and have import-related attributes set properly. The fullname is a str. ImportError is raised on failure. This method is deprecated in favor of loader.exec_module(). If exec_module() exists then it is used to provide a backwards-compatible functionality for this method. """ if not hasattr(self, 'exec_module'): raise ImportError return _bootstrap._load_module_shim(self, fullname) def module_repr(self, module): """Return a module's repr. Used by the module type when the method does not raise NotImplementedError. This method is deprecated. """ # The exception will cause ModuleType.__repr__ to ignore this method. raise NotImplementedError try: from importlib.abc import ResourceLoader except ImportError: class ResourceLoader(Loader): """Abstract base class for loaders which can return data from their back-end storage. This ABC represents one of the optional protocols specified by PEP 302. """ @abc.abstractmethod def get_data(self, path): """Abstract method which when implemented should return the bytes for the specified path. The path must be a str.""" raise IOError try: from importlib.abc import InspectLoader except ImportError: class InspectLoader(Loader): """Abstract base class for loaders which support inspection about the modules they can load. This ABC represents one of the optional protocols specified by PEP 302. """ def is_package(self, fullname): """Optional method which when implemented should return whether the module is a package. The fullname is a str. Returns a bool. Raises ImportError if the module cannot be found. """ raise ImportError def get_code(self, fullname): """Method which returns the code object for the module. The fullname is a str. Returns a types.CodeType if possible, else returns None if a code object does not make sense (e.g. built-in module). Raises ImportError if the module cannot be found. """ source = self.get_source(fullname) if source is None: return None return self.source_to_code(source) @abc.abstractmethod def get_source(self, fullname): """Abstract method which should return the source code for the module. The fullname is a str. Returns a str. Raises ImportError if the module cannot be found. """ raise ImportError @staticmethod def source_to_code(data, path='<string>'): """Compile 'data' into a code object. The 'data' argument can be anything that compile() can handle. The'path' argument should be where the data was retrieved (when applicable).""" return compile(data, path, 'exec', dont_inherit=True) exec_module = _bootstrap_external._LoaderBasics.exec_module load_module = _bootstrap_external._LoaderBasics.load_module _register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter) try: from importlib.abc import ExecutionLoader except ImportError: class ExecutionLoader(InspectLoader): """Abstract base class for loaders that wish to support the execution of modules as scripts. This ABC represents one of the optional protocols specified in PEP 302. """ @abc.abstractmethod def get_filename(self, fullname): """Abstract method which should return the value that __file__ is to be set to. Raises ImportError if the module cannot be found. """ raise ImportError def get_code(self, fullname): """Method to return the code object for fullname. Should return None if not applicable (e.g. built-in module). Raise ImportError if the module cannot be found. """ source = self.get_source(fullname) if source is None: return None try: path = self.get_filename(fullname) except ImportError: return self.source_to_code(source) else: return self.source_to_code(source, path) _register(ExecutionLoader, machinery.ExtensionFileLoader) try: from importlib.abc import FileLoader except ImportError: class FileLoader(_fileloader2.FileLoader, ResourceLoader, ExecutionLoader): """Abstract base class partially implementing the ResourceLoader and ExecutionLoader ABCs.""" _register(FileLoader, machinery.SourceFileLoader, machinery.SourcelessFileLoader) try: from importlib.abc import SourceLoader except ImportError: class SourceLoader(_fileloader2.SourceLoader, ResourceLoader, ExecutionLoader): """Abstract base class for loading source code (and optionally any corresponding bytecode). To support loading from source code, the abstractmethods inherited from ResourceLoader and ExecutionLoader need to be implemented. To also support loading from bytecode, the optional methods specified directly by this ABC is required. Inherited abstractmethods not implemented in this ABC: * ResourceLoader.get_data * ExecutionLoader.get_filename """ def path_mtime(self, path): """Return the (int) modification time for the path (str).""" if self.path_stats.__func__ is SourceLoader.path_stats: raise IOError return int(self.path_stats(path)['mtime']) def path_stats(self, path): """Return a metadata dict for the source pointed to by the path (str). Possible keys: - 'mtime' (mandatory) is the numeric timestamp of last source code modification; - 'size' (optional) is the size in bytes of the source code. """ if self.path_mtime.__func__ is SourceLoader.path_mtime: raise IOError return {'mtime': self.path_mtime(path)} def set_data(self, path, data): """Write the bytes to the path (if possible). Accepts a str path and data as bytes. Any needed intermediary directories are to be created. If for some reason the file cannot be written because of permissions, fail silently. """ _register(SourceLoader, machinery.SourceFileLoader)
{ "repo_name": "asmodehn/filefinder2", "path": "filefinder2/abc.py", "copies": "1", "size": "11794", "license": "mit", "hash": -7280925315490510000, "line_mean": 33.6882352941, "line_max": 84, "alpha_frac": 0.6223503476, "autogenerated": false, "ratio": 5.05746140651801, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6179811754118011, "avg_score": null, "num_lines": null }
"""Abstract base classes. Unfortuntaely, these are mostly useless indirections since TIDAL is now the only supported back-end.""" from abc import ABC, abstractmethod from typing import List, NamedTuple, Optional, Tuple, Type, Union class ManiaException(Exception): """Base exception class for setting an exit code""" exit_code = 0 class ManiaSeriousException(ManiaException): """A serious exception with a non-zero exit code""" exit_code = 1 class UnavailableException(Exception): """For region-locked or otherwise unavailable items""" class Artist(NamedTuple): """A musical artist""" id: str name: str class Album(NamedTuple): """An album with one or more artists""" id: str name: str artists: List[Artist] year: Optional[str] explicit: bool cover_url: Optional[str] best_available_quality: str def format_dict(self): return { "album_id": self.id, "album_name": self.name, "album_artists": ", ".join(artist.name for artist in self.artists), "album_first_artist": self.artists[0].name, "album_year": self.year or "Unknown Year", } class Track(NamedTuple): """A track with an album and one or more artists""" id: str name: str artists: List[Artist] album: Album explicit: bool track_number: int disc_number: int chosen_quality: str best_available_quality: str replay_gain: Optional[float] file_extension: str def format_dict(self, maximum_track_number=0): track_number = str(self.track_number).zfill(len(str(maximum_track_number))) return { "track_id": self.id, "track_name": self.name, "track_artists": ", ".join(artist.name for artist in self.artists), "track_first_artist": self.artists[0].name, "track_number": track_number, **self.album.format_dict(), } Media = Union[Track, Album, Artist] MediaType = Union[Type[Track], Type[Album], Type[Artist]] class Client(ABC): """An abstract streaming service client""" @abstractmethod def search(self, query: str, media_type: MediaType, count: int): pass @abstractmethod def get_album_tracks(self, album: Album) -> List[Track]: pass @abstractmethod def get_artist_albums(self, artist: Artist) -> List[Album]: pass @abstractmethod def get_artist_eps_singles(self, artist: Artist) -> List[Album]: pass @abstractmethod def get_media(self, track: Track) -> str: pass @abstractmethod def get_artist_by_id(self, artist_id: str): pass @abstractmethod def get_album_by_id(self, album_id: str): pass @abstractmethod def get_track_by_id(self, track_id: str): pass @abstractmethod def resolve_url(self, url: str) -> Tuple[MediaType, Optional[Media]]: pass
{ "repo_name": "evan-goode/mania", "path": "mania/models.py", "copies": "1", "size": "2963", "license": "unlicense", "hash": -3692694958175431000, "line_mean": 23.6916666667, "line_max": 83, "alpha_frac": 0.6277421532, "autogenerated": false, "ratio": 3.7458912768647283, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9873137398318697, "avg_score": 0.0000992063492063492, "num_lines": 120 }
"""Abstract baseclass for all distributions.""" import logging import numpy import chaospy from .utils import check_dependencies class Distribution(object): """Baseclass for all probability distributions.""" __array_priority__ = 9000 """Numpy override variable.""" interpret_as_integer = False """ Flag indicating that return value from the methods sample, and inv should be interpreted as integers instead of floating point. """ @property def stochastic_dependent(self): """True if distribution contains stochastically dependent components.""" return any(len(deps) > 1 for deps in self._dependencies) def __init__( self, parameters, dependencies, rotation=None, exclusion=None, repr_args=None, ): """ Distribution initializer. In addition to assigning some object variables, also checks for some consistency issues. Args: parameters (Optional[Distribution[str, Union[ndarray, Distribution]]]): Collection of model parameters. dependencies (Optional[Sequence[Set[int]]]): Dependency identifiers. One collection for each dimension. rotation (Optional[Sequence[int]]): The order of which to resolve dependencies. exclusion (Optional[Sequence[int]]): Distributions that has been "taken out of play" and therefore can not be reused other places in the dependency hierarchy. repr_args (Optional[Sequence[str]]): Positional arguments to place in the object string representation. The repr output will then be: `<class name>(<arg1>, <arg2>, ...)`. Raises: StochasticallyDependentError: For dependency structures that can not later be rectified. This include under-defined distributions, and inclusion of distributions that should be exclusion. """ assert isinstance(parameters, dict) self._parameters = parameters self._dependencies = list(dependencies) if rotation is None: rotation = sorted(enumerate(self._dependencies), key=lambda x: len(x[1])) rotation = [key for key, _ in rotation] rotation = list(rotation) assert len(set(rotation)) == len(dependencies) assert min(rotation) == 0 assert max(rotation) == len(dependencies)-1 self._rotation = rotation if exclusion is None: exclusion = set() self._exclusion = set(exclusion) if repr_args is None: repr_args = ("{}={}".format(key, self._parameters[key]) for key in sorted(self._parameters)) self._repr_args = list(repr_args) self._mom_cache = {(0,)*len(dependencies): 1.} self._ttr_cache = {} self._indices = {} self._all_dependencies = {dep for deps in self._dependencies for dep in deps} if len(self._all_dependencies) < len(dependencies): raise chaospy.StochasticallyDependentError( "%s is an under-defined probability distribution." % self) for key, param in list(parameters.items()): if isinstance(param, Distribution): if self._all_dependencies.intersection(param._exclusion): raise chaospy.StochasticallyDependentError(( "%s contains dependencies that can not also exist " "other places in the dependency hierarchy") % param) self._exclusion.update(param._exclusion) else: self._parameters[key] = numpy.asarray(param) def get_parameters(self, idx, cache, assert_numerical=True): """Get distribution parameters.""" del assert_numerical out = self._parameters.copy() assert isinstance(cache, dict) if idx is not None: assert not isinstance(idx, dict), idx assert idx == int(idx), idx assert "idx" not in out assert "cache" not in out out["cache"] = cache out["idx"] = idx return out @property def lower(self): """Lower bound for the distribution.""" cache = {} out = numpy.zeros(len(self)) for idx in self._rotation: out[idx] = self._get_lower(idx, cache=cache) return out def _get_lower(self, idx, cache): """In-processes function for getting lower bounds.""" if (idx, self) in cache: return cache[idx, self][0] if hasattr(self, "get_lower_parameters"): parameters = self.get_lower_parameters(idx, cache) else: parameters = self.get_parameters(idx, cache, assert_numerical=False) out = self._lower(**parameters) assert not isinstance(out, Distribution), (self, out) out = numpy.atleast_1d(out) assert out.ndim == 1, (self, out, cache) cache[idx, self] = (out, None) return out def _lower(self, **kwargs): # pragma: no cover """Backend lower bound.""" raise chaospy.UnsupportedFeature("lower not supported") @property def upper(self): """Upper bound for the distribution.""" cache = {} out = numpy.zeros(len(self)) for idx in self._rotation: out[idx] = self._get_upper(idx, cache=cache) return out def _get_upper(self, idx, cache): """In-processes function for getting upper bounds.""" if (idx, self) in cache: return cache[idx, self][0] if hasattr(self, "get_upper_parameters"): parameters = self.get_upper_parameters(idx, cache) else: parameters = self.get_parameters(idx, cache, assert_numerical=False) out = self._upper(**parameters) assert not isinstance(out, Distribution), (self, out) out = numpy.atleast_1d(out) assert out.ndim == 1, (self, out, cache) cache[idx, self] = (out, None) size = max([elem[0].size for elem in cache.values()]) assert all([elem[0].size in (1, size) for elem in cache.values()]) return out def _upper(self, **kwargs): # pragma: no cover """Backend upper bound.""" raise chaospy.UnsupportedFeature("lower not supported") def fwd(self, x_data): """ Forward Rosenblatt transformation. Args: x_data (numpy.ndarray): Location for the distribution function. ``x_data.shape`` must be compatible with distribution shape. Returns: (numpy.ndarray): Evaluated distribution function values, where ``out.shape==x_data.shape``. """ logger = logging.getLogger(__name__) check_dependencies(self) x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) cache = {} q_data = numpy.zeros(x_data.shape) for idx in self._rotation: q_data[idx] = self._get_fwd(x_data[idx], idx, cache) indices = (q_data > 1) | (q_data < 0) if numpy.any(indices): # pragma: no cover logger.debug("%s.fwd: %d/%d outputs out of bounds", self, numpy.sum(indices), len(indices)) q_data = numpy.clip(q_data, a_min=0, a_max=1) q_data = q_data.reshape(shape) return q_data def _get_fwd(self, x_data, idx, cache): """In-process function for getting cdf-values.""" logger = logging.getLogger(__name__) assert (idx, self) not in cache, "repeated evaluation" lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), x_data.shape) upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), x_data.shape) parameters = self.get_parameters(idx, cache, assert_numerical=True) ret_val = self._cdf(x_data, **parameters) assert not isinstance(ret_val, Distribution), (self, ret_val) out = numpy.zeros(x_data.shape) out[:] = ret_val indices = x_data < lower if numpy.any(indices): logger.debug("%s.fwd: %d/%d inputs below bounds", self, numpy.sum(indices), len(indices)) out = numpy.where(indices, 0, out) indices = x_data > upper if numpy.any(indices): logger.debug("%s.fwd: %d/%d inputs above bounds", self, numpy.sum(indices), len(indices)) out = numpy.where(indices, 1, out) assert numpy.all((out >= 0) | (out <= 1)) cache[idx, self] = (x_data, out) assert out.ndim == 1, (self, out, cache) return out def cdf(self, x_data): """ Cumulative distribution function. Note that chaospy only supports cumulative distribution functions for stochastically independent distributions. Args: x_data (numpy.ndarray): Location for the distribution function. Assumes that ``len(x_data) == len(distribution)``. Returns: (numpy.ndarray): Evaluated distribution function values, where output has shape ``x_data.shape`` in one dimension and ``x_data.shape[1:]`` in higher dimensions. """ check_dependencies(self) if self.stochastic_dependent: raise chaospy.StochasticallyDependentError( "Cumulative distribution does not support dependencies.") x_data = numpy.asarray(x_data) if self.interpret_as_integer: x_data = x_data+0.5 q_data = self.fwd(x_data) if len(self) > 1: q_data = numpy.prod(q_data, 0) return q_data def inv(self, q_data, max_iterations=100, tollerance=1e-5): """ Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ logger = logging.getLogger(__name__) check_dependencies(self) q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) cache = {} x_data = numpy.zeros(q_data.shape) for idx in self._rotation: x_data[idx] = self._get_inv(q_data[idx], idx, cache) x_data = x_data.reshape(shape) return x_data def _get_inv(self, q_data, idx, cache): """In-process function for getting ppf-values.""" logger = logging.getLogger(__name__) assert numpy.all(q_data <= 1) and numpy.all(q_data >= 0) assert q_data.ndim == 1 if (idx, self) in cache: return cache[idx, self][0] lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), q_data.shape) upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), q_data.shape) try: parameters = self.get_parameters(idx, cache, assert_numerical=True) ret_val = self._ppf(q_data, **parameters) except chaospy.UnsupportedFeature: ret_val = chaospy.approximate_inverse( self, idx, q_data, cache=cache) assert not isinstance(ret_val, Distribution), (self, ret_val) out = numpy.zeros(q_data.shape) out[:] = ret_val indices = out < lower if numpy.any(indices): logger.debug("%s.inv: %d/%d outputs below bounds", self, numpy.sum(indices), len(indices)) out = numpy.where(indices, lower, out) indices = out > upper if numpy.any(indices): logger.debug("%s.inv: %d/%d outputs above bounds", self, numpy.sum(indices), len(indices)) out = numpy.where(indices, upper, out) assert out.ndim == 1 cache[idx, self] = (out, q_data) assert out.ndim == 1, (self, out, cache) return out def _ppf(self, xloc, **kwargs): raise chaospy.UnsupportedFeature( "%s: does not support analytical ppf." % self) def ppf(self, q_data, max_iterations=100, tollerance=1e-5): """ Point percentile function. Also known as the inverse cumulative distribution function. Note that chaospy only supports point percentiles for univariate distributions. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ if len(self) > 1: raise ValueError( "only one-dimensional distribution supports percentiles.") return self.inv( q_data, max_iterations=max_iterations, tollerance=tollerance, ) def pdf(self, x_data, decompose=False, allow_approx=True, step_size=1e-7): """ Probability density function. If possible the density will be calculated analytically. If not possible, it will be approximated by approximating the one-dimensional derivative of the forward Rosenblatt transformation and multiplying the component parts. Note that even if the distribution is multivariate, each component of the Rosenblatt is one-dimensional. Args: x_data (numpy.ndarray): Location for the density function. If multivariate, `len(x_data) == len(self)` is required. decompose (bool): Decompose multivariate probability density `p(x), p(y|x), ...` instead of multiplying them together into `p(x, y, ...)`. allow_approx (bool): Allow the density to be estimated using numerical derivative of forward mapping if analytical approach fails. Raises error instead if false. step_size (float): The relative step size between two points used to calculate the derivative, assuming approximation is being used. Raises: chaospy.UnsupportedFeature: If analytical calculation is not possible and `allow_approx` is false. Returns: (numpy.ndarray): Evaluated density function evaluated in `x_data`. If decompose, `output.shape == x_data.shape`, else if multivariate the first dimension is multiplied together. Example: >>> chaospy.Gamma(2).pdf([1, 2, 3, 4, 5]).round(3) array([0.368, 0.271, 0.149, 0.073, 0.034]) >>> dist = chaospy.Iid(chaospy.Normal(0, 1), 2) >>> grid = numpy.mgrid[-1.5:2, -1.5:2] >>> dist.pdf(grid).round(3) array([[0.017, 0.046, 0.046, 0.017], [0.046, 0.124, 0.124, 0.046], [0.046, 0.124, 0.124, 0.046], [0.017, 0.046, 0.046, 0.017]]) >>> dist.pdf(grid, decompose=True).round(3) array([[[0.13 , 0.13 , 0.13 , 0.13 ], [0.352, 0.352, 0.352, 0.352], [0.352, 0.352, 0.352, 0.352], [0.13 , 0.13 , 0.13 , 0.13 ]], <BLANKLINE> [[0.13 , 0.352, 0.352, 0.13 ], [0.13 , 0.352, 0.352, 0.13 ], [0.13 , 0.352, 0.352, 0.13 ], [0.13 , 0.352, 0.352, 0.13 ]]]) """ logger = logging.getLogger(__name__) check_dependencies(self) x_data = numpy.asfarray(x_data) shape = x_data.shape x_data = x_data.reshape(len(self), -1) f_data = numpy.zeros(x_data.shape) cache = {} for idx in self._rotation: try: cache_ = cache.copy() f_data[idx] = self._get_pdf(x_data[idx], idx, cache) except chaospy.UnsupportedFeature: if allow_approx: logger.info( "%s: has stochastic dependencies; " "Approximating density with numerical derivative.", str(self) ) cache = cache_ f_data[idx] = chaospy.approximate_density( self, idx, x_data[idx], cache=cache, step_size=step_size) else: raise f_data = f_data.reshape(shape) if len(self) > 1 and not decompose: f_data = numpy.prod(f_data, 0) return f_data def _get_pdf(self, x_data, idx, cache): """In-process function for getting pdf-values.""" logger = logging.getLogger(__name__) assert x_data.ndim == 1 if (idx, self) in cache: return cache[idx, self][1] lower = numpy.broadcast_to(self._get_lower(idx, cache=cache.copy()), x_data.shape) upper = numpy.broadcast_to(self._get_upper(idx, cache=cache.copy()), x_data.shape) parameters = self.get_parameters(idx, cache, assert_numerical=True) ret_val = self._pdf(x_data, **parameters) assert not isinstance(ret_val, Distribution), (self, ret_val) out = numpy.zeros(x_data.shape) out[:] = ret_val indices = (x_data < lower) | (x_data > upper) if numpy.any(indices): logger.debug("%s.fwd: %d/%d inputs out of bounds", self, numpy.sum(indices), len(indices)) logger.debug("%s[%s]: %s - %s - %s", self, idx, lower, x_data, upper) out = numpy.where(indices, 0, ret_val) if self in cache: out = numpy.where(x_data == cache[self][0], out, 0) cache[idx, self] = (x_data, out) assert out.ndim == 1, (self, out, cache) return out def _pdf(self, xloc, **kwargs): raise chaospy.UnsupportedFeature( "%s: does not support analytical pdf." % self) def sample(self, size=(), rule="random", antithetic=None, include_axis_dim=False, seed=None): """ Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: ---------------------- ------------------------------------------- key description ---------------------- ------------------------------------------- ``additive_recursion`` Modulus of golden ratio samples. ``chebyshev`` Roots of first order Chebyshev polynomials. ``grid`` Regular spaced grid. ``halton`` Halton low-discrepancy sequence. ``hammersley`` Hammersley low-discrepancy sequence. ``korobov`` Korobov lattice. ``latin_hypercube`` Latin hypercube samples. ``nested_chebyshev`` Chebyshev nodes adjusted to ensure nested. ``nested_grid`` Nested regular spaced grid. ``random`` Classical (Pseudo-)Random samples. ``sobol`` Sobol low-discrepancy sequence. ---------------------- ------------------------------------------- All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. include_axis_dim (bool): By default an extra dimension even if the number of dimensions is 1. seed (Optional[int]): If provided, fixes the random variable's seed, ensuring reproducible results. Returns: (numpy.ndarray): Random samples with ``self.shape``. An extra dimension might be added to the front if either ``len(dist) > 1`` or ``include_axis_dim=True``. """ if seed is not None: state = numpy.random.get_state() numpy.random.seed(seed) out = self.sample(size, rule=rule, antithetic=antithetic, include_axis_dim=include_axis_dim) numpy.random.set_state(state) return out check_dependencies(self) size_ = numpy.prod(size, dtype=int) dim = len(self) shape = ((size,) if isinstance(size, (int, float, numpy.number)) else tuple(size)) shape = (-1,)+shape[1:] shape = shape if dim == 1 and not include_axis_dim else (dim,)+shape from chaospy.distributions import sampler out = sampler.generator.generate_samples( order=size_, domain=self, rule=rule, antithetic=antithetic) for idx, dist in enumerate(self): if dist.interpret_as_integer: out[idx] = numpy.round(out[idx]) if self.interpret_as_integer: out = numpy.round(out).astype(int) out = out.reshape(shape) return out def mom(self, K, allow_approx=True, **kwargs): """ Raw statistical moments. Creates non-centralized raw moments from the random variable. If analytical options can not be utilized, Monte Carlo integration will be used. Args: K (numpy.ndarray): Index of the raw moments. k.shape must be compatible with distribution shape. Sampling scheme when performing Monte Carlo allow_approx (bool): Allow the moments to be calculated using quadrature integration if analytical approach fails. Raises error instead if false. kwargs (Any): Arguments passed to :func:`chaospy.approximate_moment` if approximation is used. Raises: chaospy.UnsupportedFeature: If analytical calculation is not possible and `allow_approx` is false. Returns: (numpy.ndarray): Shapes are related through the identity ``k.shape == dist.shape+k.shape``. """ logger = logging.getLogger(__name__) K = numpy.asarray(K, dtype=int) assert numpy.all(K >= 0) shape = K.shape dim = len(self) if dim > 1: assert len(self) == shape[0] shape = shape[1:] size = int(K.size/dim) K = K.reshape(dim, size) try: out = [self._get_mom(kdata) for kdata in K.T] logger.debug("%s: moment calculated successfully", str(self)) except chaospy.UnsupportedFeature: if allow_approx: logger.info( "%s: has stochastic dependencies; " "Approximating moments with quadrature.", str(self)) out = [chaospy.approximate_moment(self, kdata) for kdata in K.T] else: out = [self._get_mom(kdata) for kdata in K.T] out = numpy.array(out) assert out.size == numpy.prod(shape), (out, shape) return out.reshape(shape) def _get_mom(self, kdata): """In-process function for getting moments.""" if tuple(kdata) in self._mom_cache: return self._mom_cache[tuple(kdata)] if hasattr(self, "get_mom_parameters"): parameters = self.get_mom_parameters() else: parameters = self.get_parameters(idx=None, cache={}, assert_numerical=False) assert "idx" not in parameters, (self, parameters) ret_val = float(self._mom(kdata, **parameters)) assert not isinstance(ret_val, Distribution), (self, ret_val) self._mom_cache[tuple(kdata)] = ret_val return ret_val def _mom(self, kloc, **kwargs): raise chaospy.UnsupportedFeature( "moments not supported for this distribution") def ttr(self, kloc): """ Three terms relation's coefficient generator. Args: k (numpy.ndarray, int): The order of the coefficients. Returns: (Recurrence coefficients): Where out[0] is the first (A) and out[1] is the second coefficient With ``out.shape==(2,)+k.shape``. """ check_dependencies(self) kloc = numpy.asarray(kloc, dtype=int) shape = kloc.shape kloc = kloc.reshape(len(self), -1) out = numpy.zeros((2,)+kloc.shape) for idy, kloc_ in enumerate(kloc.T): for idx in range(len(self)): out[:, idx, idy] = self._get_ttr(kloc_[idx], idx) return out.reshape((2,)+shape) def _get_ttr(self, kdata, idx): """In-process function for getting TTR-values.""" if (idx, kdata) in self._ttr_cache: return self._ttr_cache[idx, kdata] if hasattr(self, "get_ttr_parameters"): parameters = self.get_ttr_parameters(idx) else: parameters = self.get_parameters(idx, cache={}, assert_numerical=True) alpha, beta = self._ttr(kdata, **parameters) assert not isinstance(alpha, Distribution), (self, alpha) assert not isinstance(beta, Distribution), (self, beta) alpha = numpy.asfarray(alpha).item() beta = numpy.asfarray(beta).item() self._ttr_cache[idx, kdata] = (alpha, beta) return alpha, beta def _ttr(self, kloc, **kwargs): raise chaospy.UnsupportedFeature( "three terms recursion not supported for this distribution") def _get_cache(self, idx, cache, get=None): """ In-process function for getting cached values. Each time a distribution has been processed, the input and output values are stored in the cache. This checks if a distribution has been processed before and return a cache value if it is. The cached values are as follows: ----------- ------------- ------------- Context Get 0 Get 1 ----------- ------------- ------------- pdf Input values Output values cdf/fwd Input values Output values ppf/inv Output values Input values lower/upper Output values N/A ----------- ------------- ------------- Args: idx (int): Which dimension to get cache from. cache (Dict[Distribution, Tuple[numpy.ndarray, numpy.ndarray]]): Collection of cached values. Keys are distributions that has been processed earlier, values consist of up to two cache value. get (int): Which cache to retrieve. Returns: (numpy.ndarray, Distribution): The content of the cache, if any. Else return self. """ if (idx, self) in cache: assert get in (0, 1) out = cache[idx, self][get] else: out = self._cache(idx=idx, cache=cache, get=get) return out def _cache(self, idx, cache, get): """Backend function of retrieving cache values.""" return self def __getitem__(self, index): if isinstance(index, numpy.number): assert index.dtype == int index = int(index) if isinstance(index, int): if not -len(self) < index < len(self): raise IndexError("index out of bounds: %s" % index) if index < 0: index += len(self) return chaospy.ItemDistribution(int(index), self) if isinstance(index, slice): start = 0 if index.start is None else index.start stop = len(self) if index.stop is None else index.stop step = 1 if index.step is None else index.step return chaospy.J(*[self[idx] for idx in range(start, stop, step)]) raise IndexError("unrecognized key: %s" % repr(index)) def __iter__(self): for idx in range(len(self)): yield self[idx] def __len__(self): """Distribution length.""" return len(self._dependencies) def __repr__(self): """Distribution repr function.""" args = ", ".join([str(arg) for arg in self._repr_args]) return "{}({})".format(self.__class__.__name__, args) def __str__(self): """Distribution str function.""" return repr(self) def __add__(self, X): """Y.__add__(X) <==> X+Y""" return chaospy.Add(self, X) def __radd__(self, X): """Y.__radd__(X) <==> Y+X""" return chaospy.Add(self, X) def __sub__(self, X): """Y.__sub__(X) <==> X-Y""" return chaospy.Add(self, -X) def __rsub__(self, X): """Y.__rsub__(X) <==> Y-X""" return chaospy.Add(X, -self) def __neg__(self): """X.__neg__() <==> -X""" return chaospy.Negative(self) def __mul__(self, X): """Y.__mul__(X) <==> X*Y""" return chaospy.Multiply(self, X) def __rmul__(self, X): """Y.__rmul__(X) <==> Y*X""" return chaospy.Multiply(X, self) def __div__(self, X): """Y.__div__(X) <==> Y/X""" return chaospy.Multiply(self, X**-1) def __rdiv__(self, X): """Y.__rdiv__(X) <==> X/Y""" return chaospy.Multiply(X, self**-1) def __floordiv__(self, X): """Y.__floordiv__(X) <==> Y/X""" return chaospy.Multiply(self, X**-1) def __rfloordiv__(self, X): """Y.__rfloordiv__(X) <==> X/Y""" return chaospy.Multiply(X, self**-1) def __truediv__(self, X): """Y.__truediv__(X) <==> Y/X""" return chaospy.Multiply(self, X**-1) def __rtruediv__(self, X): """Y.__rtruediv__(X) <==> X/Y""" return chaospy.Multiply(X, self**-1) def __pow__(self, X): """Y.__pow__(X) <==> Y**X""" return chaospy.Power(self, X) def __rpow__(self, X): """Y.__rpow__(X) <==> X**Y""" return chaospy.Power(X, self) def __eq__(self, other): if not isinstance(other, Distribution): return False if len(other) != len(self): return False if len(self) > 1: return all([self == other for self, other in zip(self, other)]) if isinstance(self, chaospy.ItemDistribution) and isinstance(other, chaospy.ItemDistribution): return (self._parameters["index"] == other._parameters["index"] and self._parameters["parent"] is other._parameters["parent"]) return self is other def __hash__(self): return id(self)
{ "repo_name": "jonathf/chaospy", "path": "chaospy/distributions/baseclass/distribution.py", "copies": "1", "size": "32809", "license": "mit", "hash": -3351955622615232500, "line_mean": 37.7355371901, "line_max": 102, "alpha_frac": 0.5473498126, "autogenerated": false, "ratio": 4.1525123402101, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.51998621528101, "avg_score": null, "num_lines": null }
"""Abstract base class for all rendering contexts OpenGL operates with the idea of a current context in which OpenGL calls will operate. This is something a little bit more than a "window", as it includes a number of (optional) off-screen buffers, and a great deal of state which is manipulated by the various OpenGL functions. (OpenGL is basically a huge state machine). The Context in OpenGLContext is your basic interface to the context, and simple operation of OpenGLContext (such as that you'll see in most of the test scripts) can focus almost entirely on the Context object and its various customization points. If you wish to use the scene graph facilities of OpenGLContext, look particularly at the abstract function getSceneGraph, which can be overridden to provide a particular scenegraph object to the renderer. SceneGraph objects provide their own light, background, and render-traversal mechanisms, which allow you to largely ignore the Context objects. The bulk of the actual rendering work is done by the Visitor and RenderVisitor classes (or their shadow- enabled equivalents from shadow.passes), and it is these classes which define the rendering callbacks which are available from the Context class. """ from OpenGL.GL import * from OpenGLContext import visitor, texturecache,plugins from OpenGLContext.passes import renderpass from vrml.vrml97 import nodetypes from vrml import node,cache import weakref, os, time, sys, logging log = logging.getLogger( __name__ ) class LockingError( Exception ): pass try: import threading, Queue contextLock = threading.RLock() contextThread = None except ImportError: threading = 0 class DummyLock: """Dummy locking mechanism when threading is not available I do not know of any situations where it shouldn't be available on a supported platform, but it's possible I suppose. """ def acquire( self, *arguments, **named ): return def release( self, *arguments, **named ): return contextLock = DummyLock() contextThread = None def inContextThread(): """Return true if the current thread is the context thread""" if threading: if threading.currentThread() == contextThread: return 1 elif threading.currentThread().getName() == contextThread.getName(): return 1 else: return 0 return 1 class Context(object): """Abstract base class on which all Rendering Contexts are based The Context object represents a single rendering context for use by the application. This base class provides only the most rudimentary of application support, but sub-classes provide such things as navigation, and/or event handling. Attributes: sg -- OpenGLContext.basenodes.sceneGraph; the root of the node-rendering tree. If not NULL, is used to control most aspects of the rendering process. See: getSceneGraph renderPasses -- callable object, normally an instance of OpenGLContext.passes.renderpass.PassSet which implements the rendering algorithm for the Context alreadyDrawn -- flag which is set/checked to determine whether the context needs to be redrawn, see: Context.triggerRedraw( ... ) Context.shouldRedraw( ... ) Context.suppressRedraw( ... ) for the API to use to interact with this attribute. viewportDimensions -- Storage for the current viewport dimensions, see: Context.Viewport( ... ) Context.getViewport( ... ) for the API used to interact with this attribute. drawPollTimeout -- default timeout for the drawPoll method currentContext -- class attribute pointing to the currently rendering Context instance. This allows code called during a Render-pass to access the Context object. Note: wherever possible, use the passed render-pass's "context" attribute, rather than this class attribute. If that isn't possible, use the deprecated getCurrentContext function in this module. allContexts -- list of weak references to all instantiated Context objects, mostly of use for code which wants to refresh all contexts when shared resources/states are updated drawing -- flag set to indicate that this Context is currently drawing, mostly used internally frameCounter -- node, normally a framecounter.FrameCounter instance which is used to track frame rates, must have an addFrame method as seen in framecounter.FrameCounter, See setupFrameRateCounter extensions -- extensionmanager.ExtensionManager instance with which to find and initialise extensions for this context. See setupExtensionManager cache -- vrml.cache.Cache instance used for optimising the rendering of scenegraphs. See setupCache redrawRequest -- threading Event for triggering a request scenegraphLock -- threading Lock for blocking rendering from re-entering during a rendering pass pickEvents -- dictionary mapping event type and key to event object where each event requires select-render-pass support contextDefinition -- node describing the options used to create this context, passed in as "definition" argument on init, see OpenGLContext.contextdefinition.ContextDefinition for details. coreProfile -- set if the contextDefinition specifies that this is a core-profile-only context, that is, it does not support compatibility (legacy) entry points. """ currentContext = None allContexts = [] renderPasses = renderpass.defaultRenderPasses frameCounter = None contextDefinition = None ### State flags/values # Set to false to trigger a redraw on the next available iteration alreadyDrawn = None drawing = None viewportDimensions = (0,0) drawPollTimeout = 0.01 coreProfile = False ### Node-like attributes PROTO = "Context" DEF = "#Context" def __init__ (self, definition=None): """Establish the Context working environment definition -- an OpenGLContext.contextdefinition.ContextDefinition instance which controls the context features (size, bit-depth, etc). If null, then use self.contextDefinition if it exists, otherwise create a default ContextDefinition instance. Alternately, can be a dictionary of key:value pairs to set on the default ContextDefinition to specify required parameters. Calls the following: setupThreading, setupExtensionManager, initializeEventManagers, setupCallbacks, setupDefaultEventCallbacks, setupCache, setupFontProviders, setupFrameRateCounter, DoInit """ definition = self.setDefinition( definition ) self.setupThreading() self.setupExtensionManager( ) self.initializeEventManagers( ) self.setupCallbacks() self.setupDefaultEventCallbacks() self.allContexts.append( weakref.ref(self)) self.pickEvents = {} self.eventCascadeQueue = Queue.Queue() self.setupCache() self.setupFontProviders() self.setupFrameRateCounter() self.DoInit() def setDefinition( self, definition ): from OpenGLContext import contextdefinition definition = definition or self.contextDefinition if not definition: definition = contextdefinition.ContextDefinition() elif not isinstance( definition, contextdefinition.ContextDefinition ): definition = contextdefinition.ContextDefinition( **definition ) self.contextDefinition = definition self.coreProfile = definition.profile == 'core' return self.contextDefinition def DoInit( self ): """Call the OnInit method at a time when the context is valid This method provides a customization point where contexts which do not completely initialize during their __init__ method can arrange to have the OnInit method processed after their initialization has completed. The default implementation here simply calls OnInit directly w/ appropriate setCurrent and unsetCurrent calls and calls the glutInit() function with an empty argument-list. """ self.setCurrent() try: try: from OpenGL import GLUT GLUT.glutInit([]) except Exception, err: pass self.OnInit() finally: self.unsetCurrent() ### Customisation points def setupCallbacks( self ): """Establishes GUI callbacks for asynchronous event GUI systems Subclasses and applications will register events here for those event types in which they are interested. Most minor applications should use interactivecontext's abstract callbacks (which translate the GUI library's native events into a common event framework for all interactivecontexts). The default implementation does nothing. """ def setupCache( self ): """Setup caching strutures for content This includes the general compiled-geometry caches and the texture cache """ self.textureCache = texturecache.TextureCache() self.cache = cache.Cache() def setupExtensionManager( self ): """Create an extension manager for this context""" from OpenGLContext import extensionmanager self.extensions = extensionmanager.ExtensionManager() def setupFontProviders( self ): """Load font providers for the context See the OpenGLContext.scenegraph.text package for the available font providers. """ def setupDefaultEventCallbacks( self ): """Setup common callbacks for the context This will normally be done in the GUI-lib's sub-class of context. You might override it to provide other default callbacks, but you'll normally want to call the base-class implementation somewhere in that overridden method. """ self.addEventHandler( 'keyboard', name = '<escape>', function = self.OnQuit ) self.addEventHandler( 'keypress', name = 'f',modifiers = (False,False,True), # ALT function = self.OnFrameRate ) self.addEventHandler( 'keyboard', name = '<pagedown>', function = self.OnNextViewpoint ) self.addEventHandler( 'keypress', name = 's', modifiers = (False,False,True), function = self.OnSaveImage ) def OnQuit( self, event=None ): """Quit the application (forcibly)""" import sys sys.exit( 0 ) def OnFrameRate( self, event=None ): """Print the current frame-rate values""" if self.frameCounter: self.frameCounter.display = not (self.frameCounter.display) print '%sfps'%( self.frameCounter.summary()[1], ) def OnNextViewpoint( self, event=None ): """Go to the next viewpoint for the scenegraph""" sg = self.getSceneGraph() if sg: current = getattr( sg, 'boundViewpoint', None ) if current: current.isBound = False current.set_bound = False self.triggerRedraw( 1 ) def OnSaveImage( self, event=None, template='%(script)s-screen-%(count)04i.png', script=None, date=None, overwrite=False, ): """Save our current screen to disk (if possible)""" try: try: from PIL import Image # get PIL's functionality... except ImportError, err: # old style? import Image except ImportError, err: log.error( "Unable to import PIL" ) saved = False return (0,0) else: width, height = self.getViewPort() if not width or not height: return (width,height) glPixelStorei(GL_PACK_ALIGNMENT, 1) data = glReadPixelsub(0, 0, width, height, GL_RGB) if hasattr( data, 'tostring' ): string = data.tostring() else: string = data image = Image.fromstring( 'RGB', (int(width),int(height)), string ) image = image.transpose( Image.FLIP_TOP_BOTTOM) if script is None: import sys script = sys.argv[0] if date is None: import datetime date = datetime.datetime.now().isoformat() count = 0 saved = False while (not saved) and count <= 9999: count += 1 test = template%locals() if overwrite or (not os.path.exists( test )): log.warn( 'Saving to file: %s', test ) image.save( test, 'PNG' ) saved = True return (width,height) else: log.info( 'Existing file: %s', test ) return (0,0) def setupThreading( self ): """Setup primitives (locks, events) for threading """ global contextThread if threading: contextThread = threading.currentThread() contextThread.setName( "GUIThread") self.setupScenegraphLock() self.setupRedrawRequest() def setupFrameRateCounter( self ): """Setup structures for managing frame-rate This sets self.frameCounter to an instance of framecounter.FrameCounter, which is a simple node used to track frame-rate metadata during rendering. Updates to the framecounter are performed by OnDraw iff there is a visible change processed. Note: If you override this method, you need to either use an object which has the same API as a FrameCounter or use None, anything else will cause failures in the core rendering loop! """ from OpenGLContext import framecounter self.frameCounter = framecounter.FrameCounter() def initializeEventManagers( self, managerClasses=() ): """Customisation point for initialising event manager objects See: OpenGLContext.eventhandlermixin.EventHandlerMixin """ def setupRedrawRequest( self ): """Setup the redraw-request (threading) event""" if threading: self.redrawRequest = threading.Event() def setupScenegraphLock( self ): """Setup lock to protect scenegraph from updates during rendering """ if threading: self.scenegraphLock = threading.RLock() def lockScenegraph( self, blocking=1 ): """Lock scenegraph locks to prevent other update/rendering actions Potentially this could be called from a thread other than the GUI thread, allowing the other thread to update structures in the scenegraph without mucking up any active rendering pass. """ if threading: self.scenegraphLock.acquire(blocking) def unlockScenegraph( self ): """Unlock scenegraph locks to allow other update/rendering actions Potentially this could be called from a thread other than the GUI thread, allowing the other thread to update structures in the scenegraph without mucking up any active rendering pass. """ if threading: self.scenegraphLock.release() def setCurrent (self, blocking=1): """Set the OpenGL focus to this context """ assert inContextThread(), """setCurrent called from outside of the context/GUI thread! %s"""%( threading.currentThread()) if not contextLock.acquire( blocking ): raise LockingError( """Cannot acquire without blocking""" ) Context.currentContext = self self.lockScenegraph() def unsetCurrent( self ): """Give up the OpenGL focus from this context """ assert inContextThread(), """unsetCurrent called from outside of the context/GUI thread! %s"""%( threading.currentThread()) self.unlockScenegraph() Context.currentContext = None contextLock.release() def OnInit( self ): """Customization point for scene set up and initial processing You override this method to do housekeeping chores such as loading images and generating textures, loading pre-established geometry, spawning new threads, etc. This method is called after the completion of the Context.__init__ method for the rendering context. GUI implementers: Wherever possible, this should be the very last function called in the initialization of the context to allow user code to use all the functionality of the context. """ def OnIdle(self,*arguments ): ''' Override to perform actions when the rendering loop is idle ''' return self.drawPoll() def OnDraw (self, force = 1, *arguments): """Callback for the rendering/drawing mechanism force -- if true, force a redraw. If false, then only do a redraw if the event cascade has generated events. return value is whether a visible change occured This implementation does the following: * calls self.lockScenegraph() o calls self.DoEventCascade() * calls self.unlockScenegraph() * calls self.setCurrent() * calls self.renderPasses( self ) See: passes sub-package See: visitor.py, rendervisitor.py, renderpass.py, shadow/passes.py for examples of render-pass-sets which can be triggered. See: flat.py for standard second-generation renderer The RenderPasses define the core of the rendering mechanism. The default rendering passes will defer most rendering options to the scenegraph returned by self.getSceneGraph(). If that value is None (default) then the pass will use the Context's callbacks. You can define new RenderPasses to replace the rendering algorithm, override the Context's various callbacks to write raw OpenGL code, or work by customizing the scene graph library. * if there was a visible change (which is the return value from the render-pass-set), calls self.SwapBuffers() * calls self.unsetCurrent() """ assert inContextThread(), """OnDraw called from outside of the context/GUI thread! %s"""%( threading.currentThread()) # could use if self.frameCounter, but that introduces a # potential race condition, so eat the extra call... t = time.clock() self.lockScenegraph() try: changed = self.DoEventCascade() if not force and not changed: return 0 finally: self.unlockScenegraph() self.setCurrent() self.drawing = 1 if threading: self.redrawRequest.clear() try: visibleChange = self.renderPasses( self ) if visibleChange: if self.frameCounter is not None: self.frameCounter.addFrame( time.clock()-t ) return 1 return 0 finally: glFlush() self.drawing = None self.unsetCurrent() def drawPoll( self, timeout=None): """Wait timeout seconds for a redraw request timeout -- timeout in seconds, if None, use self.drawPollTimeout returns 0 if timeout, 1 if true """ if timeout is None: timeout = self.drawPollTimeout if threading: self.redrawRequest.wait( timeout ) if self.redrawRequest.isSet(): self.OnDraw( force = 1) return 1 else: self.OnDraw( force = 0) return 1 return 0 def Render( self, mode = None): """Customization point for geometry rendering This method is called by the default render passes to render the geometry for the system. Wherever possible, you should pay attention to the rendering modes to allow for optimization of your geometry (for instance, selection passes do not require lighting). The default implementation merely ensures that matrix mode is currently model view. See: visitor.py, rendervisitor.py, renderpass.py, shadow/passes.py for definitions of the properties of the mode. """ ### Put your rendering code here def DoEventCascade( self ): """Customization point for generating non-GUI event cascades This method should only be called after self.lockScenegraph has been called. self.unlockScenegraph should then be called Most Contexts will use the eventhandler mix-in's version of this method. That provides support for the defered-execution of functions/method during the event cascade. """ return 0 def OnResize (self, *arguments): '''Resize the window when the windowing library says to''' self.triggerRedraw(1) def triggerPick( self): """Trigger a selection rendering pass If the context is not currently drawing, the selection render will occur immediately, otherwise it will occur the next time the rendering loop reaches the selection stage. """ contextLock.acquire() try: if (not self.drawing) and inContextThread(): self.OnDraw() elif threading: self.redrawRequest.set() finally: contextLock.release() def triggerRedraw( self, force=0 ): """Indicate to the context that it should redraw when possible If force is true, the rendering will begin immediately if the context is not already drawing. Otherwise only the indicator flag will be set. """ contextLock.acquire() try: self.alreadyDrawn = 0 finally: contextLock.release() if force and (not self.drawing) and inContextThread(): self.OnDraw() elif threading: self.redrawRequest.set() else: raise RuntimeError( """Unreasonable threading state!""" ) def shouldRedraw( self ): """Return whether or not the context contents need to be redrawn""" return not self.alreadyDrawn def suppressRedraw( self ): """Indicate to the context that there is no need to re-render This method signals to the context that there are no updates currently requiring redrawing of the context's contents. See: Context.shouldRedraw and Context.triggerRedraw """ self.alreadyDrawn = 1 def SwapBuffers (self): """Called by the rendering loop when the buffers should be swapped Each GUI library needs to override this method with the appropriate code for the library. """ def ViewPort( self, width, height ): """Set the size of the OpenGL rendering viewport for the context This implementation assumes that the context takes up the entire underlying window (i.e. that it starts at 0,0 and that width, height will represent the entire size of the window). """ assert inContextThread(), """ViewPort called from outside of the context/GUI thread! %s"""%( threading.currentThread()) self.setCurrent() try: self.viewportDimensions = width, height glViewport( 0,0, int(width), int(height) ) finally: self.unsetCurrent() if self.contextDefinition: self.contextDefinition.size = width,height def getViewPort( self ): """Method to retrieve the current dimensions of the context Return value is a width, height tuple. See Context.ViewPort for setting of this value. """ return self.viewportDimensions def addPickEvent( self, event ): """Add event to list of events to be processed by selection-render-mode This is a method of the Context, rather than the rendering pass (which might seem more elegant given that it is the rendering pass which deals with the events being registered) because the requests to render a pick event occur outside of the rendering loop. As a result, there is (almost) never an active context when the pick-event-request comes in. """ self.pickEvents[ (event.type, event.getKey()) ] = event def getPickEvents( self ): """Get the currently active pick-events""" return self.pickEvents def getSceneGraph( self ): """Get the scene graph for the context (or None) You must return an instance of: OpenGLContext.scenegraph.scenegraph.SceneGraph Normally you would create that using either a loader from OpenGLContext.loader: from OpenGLContext.loader import vrml97 def OnInit( self ): self.sg = vrml97.load( 'c:\\somefile\\world.wrl' ) or by using the classes in OpenGLContext.scenegraph.basenodes: from OpenGLContext.scenegraph import basenodes def OnInit( self ): self.sg = basenodes.sceneGraph( children = [ basenodes.Transform(...) ], ) to define the scenegraph in Python code. """ return getattr( self, 'sg', None) def renderedChildren( self, types = None ): """Get the rendered children of the scenegraph""" sg = self.getSceneGraph() if not sg: return (ContextRenderNode,) else: return (sg,) ### app-framework stuff APPLICATION_NAME = 'OpenGLContext' def getApplicationName( cls ): """Retrieve the application name for configuration purposes""" return cls.APPLICATION_NAME getApplicationName = classmethod( getApplicationName ) def getUserAppDataDirectory( cls ): """Retrieve user-specific configuration directory Default implementation gives a directory-name in the user's (system-specific) "application data" directory named """ from OpenGLContext.browser import homedirectory base = homedirectory.appdatadirectory() if sys.platform == 'win32': name = cls.getApplicationName() else: # use a hidden directory on non-win32 systems # as we are storing in the user's home directory name = '.%s'%( cls.getApplicationName()) path = os.path.join( base, name, ) if not os.path.isdir( path ): os.makedirs( path, mode=0770 ) return path getUserAppDataDirectory = classmethod( getUserAppDataDirectory ) ttfFileRegistry = None def getTTFFiles( self ): """Get TrueType font-file registry object""" if self.ttfFileRegistry: return self.ttfFileRegistry from ttfquery import ttffiles registryFile = os.path.join(self.getUserAppDataDirectory(), 'font_metadata.cache') from OpenGLContext.scenegraph.text import ttfregistry registry = ttfregistry.TTFRegistry() if os.path.isfile( registryFile ): log.info( "Loading font metadata from cache %r", registryFile ) registry.load( registryFile ) if not registry.fonts: log.warn( "Re-scanning fonts, no fonts found in cache" ) registry.scan() registry.save() log.info( "Font metadata stored in cache %r", registryFile ) else: log.warn( "Scanning font metadata into cache %r, please wait", registryFile ) registry.scan() registry.save( registryFile ) log.info( "Font metadata stored in cache %r", registryFile ) # make this a globally-available object Context.ttfFileRegistry = registry return registry def getDefaultTTFFont( cls, type='sans' ): """Get the current user's preference for a default font""" import os directory = cls.getUserAppDataDirectory() filename = os.path.join( directory, 'defaultfont-%s.txt'%(type.lower(),) ) name = None try: name = open(filename).readline().strip() except IOError, err: pass if not name: name = None return name getDefaultTTFFont = classmethod( getDefaultTTFFont ) def setDefaultTTFFont( cls, name, type='sans' ): """Set the current user's preference for a default font""" import os directory = cls.getUserAppDataDirectory() filename = os.path.join( directory, 'defaultfont-%s.txt'%(type.lower(),) ) if not name: try: os.remove( filename ) except Exception, err: return False else: return True else: try: open(filename,'w').write( name ) except IOError, err: return False return True setDefaultTTFFont = classmethod( setDefaultTTFFont ) def getContextTypes( cls, type=plugins.InteractiveContext ): """Retrieve the set of defined context types type -- testing type key from setup.py for the registered modules returns list of setuptools entry-point objects which can be passed to getContextType( name ) to retrieve the actual context type. """ return type.all() getContextTypes = classmethod( getContextTypes ) def getContextType( cls, entrypoint=None, type=plugins.InteractiveContext, ): """Load a single context type via entry-point resolution returns a Context sub-class *or* None if there is no such context defined/available, will have a ContextMainLoop method for running the Context top-level loop. """ if entrypoint is None: entrypoint = cls.getDefaultContextType() or 'glut' if isinstance( entrypoint, (str,unicode)): for ep in cls.getContextTypes( type ): if entrypoint == ep.name: return cls.getContextType( ep, type=type ) return None try: classObject = entrypoint.load() except ImportError, err: return None else: return classObject getContextType = classmethod( getContextType ) def getDefaultContextType( cls ): """Get the current user's preference for a default context type""" import os directory = cls.getUserAppDataDirectory() filename = os.path.join( directory, 'defaultcontext.txt' ) name = None try: name = open(filename).readline().strip() except IOError, err: pass if not name: name = None return name getDefaultContextType = classmethod( getDefaultContextType ) def setDefaultContextType( cls, name ): """Set the current user's preference for a default font""" import os directory = cls.getUserAppDataDirectory() filename = os.path.join( directory, 'defaultcontext.txt' ) if not name: try: os.remove( filename ) except Exception, err: return False else: return True else: try: open(filename,'w').write( name ) except IOError, err: return False return True setDefaultContextType = classmethod( setDefaultContextType ) def ContextMainLoop( cls, *args, **named ): """Mainloop for the context, each GUI sub-class must override this""" raise NotImplementedError( """No mainloop specified for context class %r"""%( cls, )) ContextMainLoop = classmethod( ContextMainLoop ) ## def getUserContextPreferences( cls ): ## """Retrieve user-specific context preferences""" ## raise NotImplementedError( """Don't have preferences working yet""" ) @staticmethod def fromConfig( cfg ): """Given a ConfigParser instance, produce a configured sub-class""" from OpenGLContext import plugins from OpenGLContext import contextdefinition type = gui = None if cfg.has_option( 'context', 'type' ): type = cfg.get( 'context', 'type' ) if cfg.has_option( 'context', 'gui' ): gui = cfg.get( 'context', 'gui' ) if type is None: type = 'vrml' for plug_type in [ plugins.InteractiveContext, plugins.VRMLContext, plugins.Context, ]: if type == plug_type.type_key: type = plug_type baseCls = Context.getContextType( gui, type ) baseCls = type( 'TestingContext', (baseCls,), { 'contextDefinition': contextdefinition.ContextDefinition.fromConfig( cfg, ), } ) return baseCls ### Context render-calling child... class _ContextRenderNode( nodetypes.Rendering, nodetypes.Children, node.Node ): """The Context object as a RenderNode Returned as the child of the Context if there is no getSceneGraph() result. """ def Render( self, mode ): """Delegate rendering to the mode.context.Render method""" return mode.context.Render( mode ) def sortKey( self, passes, matrix ): return (0,None) ContextRenderNode = _ContextRenderNode() def getCurrentContext( ): """Get the currently-rendering context This function allows code running during the render cycle to determine the current context. As a general rule, the context is available as rendermode.context from the render mode/pass which is passed to the rendering functions as an argument. Note: this function is deprecated, use the passed rendering mode/pass's context attribute instead. """ return Context.currentContext
{ "repo_name": "alexus37/AugmentedRealityChess", "path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/context.py", "copies": "1", "size": "35588", "license": "mit", "hash": -4316208307248437000, "line_mean": 37.0213675214, "line_max": 131, "alpha_frac": 0.615966056, "autogenerated": false, "ratio": 4.950340798442064, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.013046788890787043, "num_lines": 936 }
"""Abstract base class for Decision classes. See the NoDecision class and others in this module for examples. To create your own subclass of the Decision class you must customize the following class constants: - ENUM - FIELDS - SHAPES - DTYPE - RECORD_FIELDS The 'ENUM' field should be a python 'Enum' class created by subclassing and customizing 'Enum' in the normal pythonic way. The elements of the 'Enum' are the actual decision choices, and their numeric value is used for serialization. The 'FIELDS' constant is a specification of the number of fields that a decision record will have. All decision records should contain the 'decision_id' field which is the choice of decision. This class implements that and should be used as shown in the examples. In order that fields be serializable to different formats, we also require that they be a numpy array datatype. To support this we require the data shapes and data types for each field. Elements of SHAPES and DTYPES should be of a format recognizable by the numpy array constructor. To this we allow the additional option of specifying SHAPES as the python built-in name Ellipsis (AKA '...'). This will specify the shape as a variable length 1-dimensional array. The RECORD_FIELDS is used as a way to specify fields which are amenable to placement in simplified summary tables, i.e. simple non-compound values. The only method that needs to be implemented in the Decision is 'action'. This function actually implements the algorithm for taking actions on the decisions and instructions and is called from the resampler to perform them on the collection of walkers. """ from collections import namedtuple from enum import Enum from string import ascii_lowercase import logging import numpy as np # ABC for the Decision class class Decision(object): """Represents and provides methods for a set of decision values. """ ENUM = None """The enumeration of the decision types. Maps them to integers.""" DEFAULT_DECISION = None """The default decision to choose.""" FIELDS = ('decision_id',) """The names of the fields that go into the decision record.""" # suggestion for subclassing, FIELDS and others # FIELDS = super().FIELDS + ('target_idxs',) # etc. # An Ellipsis instead of fields indicate there is a variable # number of fields. SHAPES = ((1,),) """Field data shapes.""" DTYPES = (np.int,) """Field data types.""" RECORD_FIELDS = ('decision_id',) """The fields that could be used in a reduced table-like representation.""" ANCESTOR_DECISION_IDS = None """Specify the enum values where their walker state sample value is passed on in the next generation, i.e. after performing the action.""" @classmethod def default_decision(cls): return cls.DEFAULT_DECISION @classmethod def field_names(cls): """Names of the decision record fields.""" return cls.FIELDS @classmethod def field_shapes(cls): """Field data shapes.""" return cls.SHAPES @classmethod def field_dtypes(cls): """Field data types.""" return cls.DTYPES @classmethod def fields(cls): """Specs for each field. Returns ------- fields : list of tuples Field specs each spec is of the form (name, shape, dtype). """ return list(zip(cls.field_names(), cls.field_shapes(), cls.field_dtypes())) @classmethod def record_field_names(cls): """The fields that could be used in a reduced table-like representation.""" return cls.RECORD_FIELDS @classmethod def enum_dict_by_name(cls): """Get the decision enumeration as a dict mapping name to integer.""" if cls.ENUM is None: raise NotImplementedError d = {} for enum in cls.ENUM: d[enum.name] = enum.value return d @classmethod def enum_dict_by_value(cls): """Get the decision enumeration as a dict mapping integer to name.""" if cls.ENUM is None: raise NotImplementedError d = {} for enum in cls.ENUM: d[enum.value] = enum return d @classmethod def enum_by_value(cls, enum_value): """Get the enum name for an enum_value. Parameters ---------- enum_value : int Returns ------- enum_name : enum """ d = cls.enum_dict_by_value() return d[enum_value] @classmethod def enum_by_name(cls, enum_name): """Get the enum name for an enum_value. Parameters ---------- enum_name : enum Returns ------- enum_value : int """ d = cls.enum_dict_by_name() return d[enum_name] @classmethod def record(cls, enum_value, **fields): """Generate a record for the enum_value and the other fields. Parameters ---------- enum_value : int Returns ------- rec : dict of str: value """ assert enum_value in cls.enum_dict_by_value(), "value is not a valid Enumerated value" for field_key in fields.keys(): assert field_key in cls.FIELDS, \ "The field {} is not a field for that decision".format(field_key) assert field_key != 'decision_id', "'decision_id' cannot be an extra field" rec = {'decision_id' : enum_value} rec.update(fields) return rec @classmethod def action(cls, walkers, decisions): """Perform the instructions for a set of resampling records on walkers. The decisions are a collection of decision records which contain the decision value and the instruction values for a particular walker within its cohort (sample set). The collection is organized as a list of lists. The outer list corresponds to the steps of resampling within a cycle. The inner list is a list of decision records for a specific step of resampling, where the index of the decision record is the walker index. Parameters ---------- walkers : list of Walker objects The walkers you want to perform the decision instructions on. decisions : list of list of decision records The decisions for each resampling step and their instructions to apply to the walkers. Returns ------- resampled_walkers : list of Walker objects The resampled walkers. Raises ------ NotImplementedError : abstract method """ raise NotImplementedError @classmethod def parents(cls, step): """Given a step of resampling records (for a single resampling step) returns the parents of the children of this step. Parameters ---------- step : list of decision records The decision records for a step of resampling for each walker. Returns ------- walker_step_parents : list of int For each element, the index of it in the list corresponds to the child index and the value of the element is the index of it's parent before the decision action. """ # initialize a list for the parents of this stages walkers step_parents = [None for i in range(len(step))] # the rest of the stages parents are based on the previous stage for parent_idx, parent_rec in enumerate(step): # if the decision is an ancestor then the instruction # values will be the children if parent_rec[0] in cls.ANCESTOR_DECISION_IDS: # the first value of the parent record is the target # idxs child_idxs = parent_rec[1] for child_idx in child_idxs: step_parents[child_idx] = parent_idx return step_parents class NothingDecisionEnum(Enum): """Enumeration of the decision values for doing nothing.""" NOTHING = 0 """Do nothing with the walker.""" class NoDecision(Decision): """Decision for a resampling process that does no resampling.""" ENUM = NothingDecisionEnum DEFAULT_DECISION = ENUM.NOTHING FIELDS = Decision.FIELDS + ('target_idxs',) SHAPES = Decision.SHAPES + (Ellipsis,) DTYPES = Decision.DTYPES + (np.int,) RECORD_FIELDS = Decision.RECORD_FIELDS + ('target_idxs',) ANCESTOR_DECISION_IDS = (ENUM.NOTHING.value,) @classmethod def action(cls, walkers, decisions): # list for the modified walkers mod_walkers = [None for i in range(len(walkers))] # go through each decision and perform the decision # instructions for walker_idx, decision in enumerate(decisions): decision_value, instruction = decision if decision_value == cls.ENUM.NOTHING.value: # check to make sure a walker doesn't already exist # where you are going to put it if mod_walkers[instruction[0]] is not None: raise ValueError( "Multiple walkers assigned to position {}".format(instruction[0])) # put the walker in the position specified by the # instruction mod_walkers[instruction[0]] = walkers[walker_idx] return mod_walkers
{ "repo_name": "ADicksonLab/wepy", "path": "src/wepy/resampling/decisions/decision.py", "copies": "1", "size": "9590", "license": "mit", "hash": 6532357942551243000, "line_mean": 28.0606060606, "line_max": 94, "alpha_frac": 0.6256517205, "autogenerated": false, "ratio": 4.493908153701968, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5619559874201967, "avg_score": null, "num_lines": null }
"""Abstract base class for detectors. """ # standard imports from typing import Union, Tuple, List, Any import logging # third party imports import numpy as np # toolbox imports from ..base.data import Data from ..base.meta import Metadata from ..base.image import Image, Imagelike from .tool import Tool from .image import ImageTool # logging LOG = logging.getLogger(__name__) # A type for possible detections Detections = Union[Metadata] class Detector(Tool): # pylint: disable=too-many-ancestors """A general detector. A detector is intended to detect something in some given data. The basic detector interface (:py:meth:`detect`) simply maps given data to detections. What detections are and how they are represented will differ for specific subclasses (for example an ImageDetector typically returns a list of bounding boxes). """ # # Detector # def _process(self, data, **kwargs) -> Any: """Processing data with a :py:class:`Detector` means detecting. """ return self._detect(data, **kwargs) # FIXME[todo]: working on batches (data.is_batch). Here arises the # question what the result type should be for the functional API # (A): a list/tuple or some iterator, or even another structure # (a batch version of Metadata) def detect(self, data: Data, **kwargs) -> Detections: """Preprocess the given data and apply the detector. This method is intended for synchronous use - it dose neither alter the `data` object, nor the detector itself. Depending on the detector, it may be possible to run the method multiple times in parallel. Arguments --------- data: Data The data to be fed to the detector. This may be a :py:class:`Data` object or simple data array. Result ------ detection: Detections The dections. """ if not self.prepared: # FIXME[todo]: decorator @assert_prepared... raise RuntimeError("Running unprepared detector.") # FIXME[todo/hack]: the following will data batches # currently we simply flatten the batch, taking the first item. # The correct approach would be to really do detection on # the whole batch if data.is_batch: raise ValueError("Detector currently does not support " "batch detection.") LOG.info("Running detector '%s' on data %r", self.key, data) if not data: return None # obtain the preprocessed input data preprocessed_data = self.preprocess(data) print("detect:", type(data), type(preprocessed_data)) # do the actual processing detections = self._detect(preprocessed_data, **kwargs) LOG.info("Detector '%s' with %s detections", self.key, detections) detections = self._adapt_detections(detections, data) return detections def _detect(self, data: np.ndarray, **kwargs) -> Detections: """Do the actual detection. The detector will return a Metadata structure containing the detections as a list of :py:class:`Location`s (usually of type :py:class:`BoundingBox`) in the 'regions' property. """ raise NotImplementedError("Detector class '" + type(self).__name__ + "' is not implemented (yet).") def _detect_batch(self, data: np.ndarray, **kwargs) -> Detections: # FIXME[todo]: batch processing raise NotImplementedError("Detector class '" + type(self).__name__ + "' is not implemented (yet).") def _adapt_detections(self, detections: Detections, data: Data) -> Detections: raise NotImplementedError("Detector class '" + type(self).__name__ + "' is not implemented (yet).") # # Processor # def _preprocess_data(self, data: Data, **kwargs) -> None: """This method does the actual preprocessing. This method may be overwritten by subclasses to add attributes to the data object, without assigning values yet (this can be done in :py:meth:`_process_data` or :py:meth:`_process_batch`). This method may set the data object and notify observers, allowing them to observe how the data object gets filled during processing. """ super()._preprocess_data(data) self.add_data_attribute(data, 'detections') def _process_data(self, data: Data, **kwargs) -> None: """Process the given data. This will run the detector on the data and add the detection results as new attribute 'detections'. """ LOG.debug("Processing data %r with detector %s", data, self) # self.detect() includes preprocessing and postprocessing detections = self.detect(data) self.set_data_attribute(data, 'detections', detections) LOG.debug("Detections found 2: %s, %s", self.detections(data), data) def detections(self, data) -> Metadata: """Provide the detections from a data object that was processed by this :py:class:`Detector`. """ return self.get_data_attribute(data, 'detections') class ImageDetector(Detector, ImageTool): # pylint: disable=too-many-ancestors """A detector to be applied to image data. """ def __init__(self, size: Tuple[int, int] = None, **kwargs) -> None: super().__init__(**kwargs) self._size = size # # Implementation of the private API # external_result: Tuple[str] = ('detections', ) internal_arguments: Tuple[str] = ('_data', ) internal_result: Tuple[str] = ('_detections', ) def _preprocess(self, image: Imagelike, **kwargs) -> Data: data = super()._preprocess(image, **kwargs) data.add_attribute('_data', getattr(data, 'scaled', data.image)) return data def _postprocess(self, data: Data, name: str) -> None: # FIXME[todo]: batch processing if name == 'detections': if hasattr(data, '_detections'): detections = data._detections if self._size is not None and hasattr(data, 'image'): size = data.image.shape resize_ratio = max(self._size[0]/size[0], self._size[1]/size[1]) detections.scale(resize_ratio) else: detections = None data.add_attribute('detections', detections) elif name == 'mark': if not hasattr(data, 'detections'): self._postprocess(data, 'detections') data.add_attribute(name, self.mark_image(data.image, data.detections)) elif name == 'extract': if not hasattr(data, 'detections'): self._postprocess(data, 'detections') data.add_attribute(name, self.extract_from_image(data.image, data.detections)) else: super()._postprocess(data, name) # # FIXME[old]: # def _preprocess_old(self, array: np.ndarray, **kwargs) -> np.ndarray: """Preprocess the image. This will resize the image to the target size of this tool, if such a size is set. """ if array.ndim != 2 and array.ndim != 3: raise ValueError("The image provided has an illegal format: " f"shape={array.shape}, dtype={array.dtype}") # if self._size is not None: # resize_ratio = array.shape[1]/400.0 # array = imutils.resize(array, width=400) # FIXME[hack] return super()._preprocess(array, **kwargs) def _adapt_detections(self, detections: Detections, data: Data) -> Detections: if detections is None: return None # if we have scaled the input data, then we have to apply reverse # scaling to the detections. if self._size is not None: size = data.array.shape resize_ratio = max(self._size[0]/size[0], self._size[1]/size[1]) detections.scale(resize_ratio) return detections def _postprocess_data(self, data: Data, mark: bool = False, extract: bool = False, **kwargs) -> None: """Apply different forms of postprocessing to the data object, extending it by additional tool specific attributes. Arguments --------- mark: bool Visually mark the detections in a copy of the image and store the result in the data object as the tool specific attribute `marked`. extract: bool Extract a list of image patches corresponding to the detections from the image and store the result in the data object as the tool specific attribute `extractions`. """ if mark: self.mark_data(data) if extract: self.extract_data(data) # # Image specific methods # def detect_image(self, image: Imagelike, **kwargs) -> Detections: """Apply the detector to the given image. """ return self.detect(Image.as_data(image)) def process_image(self, image: Imagelike, **kwargs) -> Data: """Create an image data object and process it with this :py:class:`ImageDetector`. Result ------ The processed data object. """ data = Image.as_data(image) self.apply(data, **kwargs) return data # # Marking detections # def mark_image(self, image: Imagelike, detections: Detections = None, copy: bool = True) -> np.ndarray: """Mark the given detections in an image. Arguments --------- image: Imagelike The image into which the detections are to be drawn. detections: Detections The detections to draw. copy: bool A flag indicating if detections should be marked in a copy of the image (`True`) or into the original image object (`False`). Result ------ marked_image: np.ndarray An image in which the given detections are visually marked. """ if detections is None: detections = self._detect(image) array = Image.as_array(image, copy=copy) if detections: for index, region in enumerate(detections.regions): region.mark_image(array) return array def mark_data(self, data: Data, detections: Detections = None) -> None: """Extend the given `Data` image object by a tool specific attribute, called `marked`, holding a copy of the original image in which the detections are marked. This function assumes that the detect has already be applied to the given data object and the detections are stored in a tool specific attribute called `detections`. Arguments --------- data: Data The data object do be marked. detections: Detections The detections to mark in the image. If None are provided the detections from the tools specific attribute `detections` is used. """ if detections is None: detections = self.detections(data) marked_image = self.mark_image(data.array, detections, copy=True) self.add_data_attribute(data, 'mark', marked_image) def marked_image(self, data) -> np.ndarray: """Get a version of the image with visually marked detections. This method assumes that this image has already be stored as an attribute to the data object, e.g., by calling the method :py:meth:`mark_data`, or by provding the argument `mark=True` when calling :py:meth:`process`. """ return self.get_data_attribute(data, 'mark') # # Extracting detections # def extract_from_image(self, image: Imagelike, detections: Detections, copy: bool = True) -> List[np.ndarray]: """Extract detections as a list of image patches from a given image. Arguments --------- image: Imagelike The image into which the detections are to be drawn. detections: Detections The detections to draw. copy: bool A flag indicating if extracted images should be realized as views using the same memory as the original image (False), or if real copies should be created (True). In some situations (e.g., if the detection includes invalid regions outside the image), only copy is valid and will be done no matter the value of this argument. Result ------ extractions: List[np.ndarray] An list of extracted image regions. """ array = Image.as_array(image) extractions = [] if detections: for region in detections.regions: extractions.append(region.location.extract(array, copy=copy)) return extractions def extract_data(self, data: Data, detections: Detections = None) -> None: """Extend the given `Data` image object by a tool specific attribute, called `extractions`, holding a list of extracted image patches based on the detections done by this :py:class:`ImageDetector`. This function assumes that the detector has already be applied to the given data object and the detections are stored in a tool specific attribute called `detections`. Arguments --------- data: Data The data object do be marked. detections: Detections The detections to be extracted from the image. If None are provided the detections from the tools specific data attribute `detections` is used. """ if detections is None: detections = self.detections(data) extractions = self.extract_from_image(data, detections) self.add_data_attribute(data, 'extract', extractions) def extractions(self, data) -> List[np.ndarray]: """Get a list of image patches extracted from the original image based on detections of this :py:class:ImageDetector`. This method assumes that this list has already been stored as an tool specific attribute `extractions` in the data object, e.g., by calling the method :py:meth:`extract_data`, or by provding the argument `extract=True` when calling :py:meth:`process`. """ return self.get_data_attribute(data, 'extract')
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/tool/detector.py", "copies": "1", "size": "15187", "license": "mit", "hash": -3912227552129732600, "line_mean": 35.7723970944, "line_max": 78, "alpha_frac": 0.5916902614, "autogenerated": false, "ratio": 4.416109334108753, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5507799595508752, "avg_score": null, "num_lines": null }
"""Abstract base class for implementing xml enum types.""" # ***** BEGIN LICENSE BLOCK ***** # # Copyright (c) 2007-2012, Python File Format Interface # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the Python File Format Interface # project nor the names of its contributors may be used to endorse # or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # ***** END LICENSE BLOCK ***** import logging import struct from pyffi.object_models.xml.basic import BasicBase from pyffi.object_models.editable import EditableComboBox class _MetaEnumBase(type): """This metaclass checks for the presence of _enumkeys, _enumvalues, and _numbytes attributes. It also adds enum class attributes. Used as metaclass of EnumBase.""" def __init__(cls, name, bases, dct): super(_MetaEnumBase, cls).__init__(name, bases, dct) # consistency checks if not '_enumkeys' in dct: raise TypeError('%s: missing _enumkeys attribute'%cls) if not '_enumvalues' in dct: raise TypeError('%s: missing _enumvalues attribute'%cls) if not '_numbytes' in dct: raise TypeError('%s: missing _numbytes attribute'%cls) # check storage type if cls._numbytes == 1: cls._struct = 'B' elif cls._numbytes == 2: cls._struct = 'H' elif cls._numbytes == 4: cls._struct = 'I' else: raise RuntimeError("unsupported enum numbytes") # template type? cls._is_template = False # does the type contain a Ref or a Ptr? cls._has_links = False # does the type contain a Ref? cls._has_refs = False # does the type contain a string? cls._has_strings = False # for other read/write checking cls._min = 0 cls._max = (1 << (cls._numbytes * 8)) - 1 # set enum values as class attributes for item, value in zip(cls._enumkeys, cls._enumvalues): setattr(cls, item, value) class EnumBase(BasicBase, EditableComboBox, metaclass=_MetaEnumBase): _enumkeys = [] _enumvalues = [] _numbytes = 1 # default width of an enum # # BasicBase methods # def __init__(self, **kwargs): super(EnumBase, self).__init__(**kwargs) self._value = self._enumvalues[0] def get_value(self): """Return stored value.""" return self._value def set_value(self, value): """Set value to C{value}.""" try: val = int(value) except ValueError: try: val = int(value, 16) # for '0x...' strings except ValueError: if value in self._enumkeys: val = getattr(self, value) else: raise ValueError( "cannot convert value '%s' to integer"%value) if not val in self._enumvalues: logger = logging.getLogger("pyffi.object_models.xml.enum") logger.error('invalid enum value (%i) for %s' % (val, self.__class__.__name__)) else: self._value = val def read(self, stream, data): """Read value from stream.""" self._value = struct.unpack(data._byte_order + self._struct, stream.read(self._numbytes))[0] def write(self, stream, data): """Write value to stream.""" stream.write(struct.pack(data._byte_order + self._struct, self._value)) def __str__(self): try: return self._enumkeys[self._enumvalues.index(self.get_value())] except ValueError: # not in _enumvalues list return "<INVALID (%i)>" % self.get_value() def get_size(self, data=None): """Return size of this type.""" return self._numbytes def get_hash(self, data=None): """Return a hash value for this value.""" return self.get_value() # # EditableComboBox methods # def get_editor_keys(self): """List or tuple of strings, each string describing an item.""" return self._enumkeys def set_editor_value(self, index): """Set value from item index.""" self.set_value(self._enumvalues[index]) def get_editor_value(self): """Get the item index from the enum value.""" return self._enumvalues.index(self._value) def get_detail_display(self): """Return object that can be used to display the instance.""" try: return self._enumkeys[self._enumvalues.index(self._value)] except ValueError: # value self._value is not in the self._enumvalues list return "<INVALID (0x%08X)>" % self._value
{ "repo_name": "griest024/PokyrimTools", "path": "pyffi-develop/pyffi/object_models/xml/enum.py", "copies": "1", "size": "6158", "license": "mit", "hash": -4735618094553145000, "line_mean": 34.5953757225, "line_max": 75, "alpha_frac": 0.6131860994, "autogenerated": false, "ratio": 4.312324929971989, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5425511029371989, "avg_score": null, "num_lines": null }
"""Abstract base class for kernel clients""" #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports import abc #----------------------------------------------------------------------------- # Main kernel client class #----------------------------------------------------------------------------- class KernelClientABC(object): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.client.KernelClient` """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def kernel(self): pass @abc.abstractproperty def shell_channel_class(self): pass @abc.abstractproperty def iopub_channel_class(self): pass @abc.abstractproperty def hb_channel_class(self): pass @abc.abstractproperty def stdin_channel_class(self): pass #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @abc.abstractmethod def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): pass @abc.abstractmethod def stop_channels(self): pass @abc.abstractproperty def channels_running(self): pass @abc.abstractproperty def shell_channel(self): pass @abc.abstractproperty def iopub_channel(self): pass @abc.abstractproperty def stdin_channel(self): pass @abc.abstractproperty def hb_channel(self): pass
{ "repo_name": "noslenfa/tdjangorest", "path": "uw/lib/python2.7/site-packages/IPython/kernel/clientabc.py", "copies": "2", "size": "2081", "license": "apache-2.0", "hash": 3791017616693626000, "line_mean": 24.6913580247, "line_max": 79, "alpha_frac": 0.4574723691, "autogenerated": false, "ratio": 5.594086021505376, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.7051558390605376, "avg_score": null, "num_lines": null }
"""Abstract base class for kernel clients""" #----------------------------------------------------------------------------- # Copyright (c) The Jupyter Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import abc from ipython_genutils.py3compat import with_metaclass #----------------------------------------------------------------------------- # Main kernel client class #----------------------------------------------------------------------------- class KernelClientABC(with_metaclass(abc.ABCMeta, object)): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `jupyter_client.client.KernelClient` """ @abc.abstractproperty def kernel(self): pass @abc.abstractproperty def shell_channel_class(self): pass @abc.abstractproperty def iopub_channel_class(self): pass @abc.abstractproperty def hb_channel_class(self): pass @abc.abstractproperty def stdin_channel_class(self): pass @abc.abstractproperty def control_channel_class(self): pass #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @abc.abstractmethod def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): pass @abc.abstractmethod def stop_channels(self): pass @abc.abstractproperty def channels_running(self): pass @abc.abstractproperty def shell_channel(self): pass @abc.abstractproperty def iopub_channel(self): pass @abc.abstractproperty def stdin_channel(self): pass @abc.abstractproperty def hb_channel(self): pass @abc.abstractproperty def control_channel(self): pass
{ "repo_name": "sserrot/champion_relationships", "path": "venv/Lib/site-packages/jupyter_client/clientabc.py", "copies": "1", "size": "2261", "license": "mit", "hash": 6259654574437254000, "line_mean": 24.6931818182, "line_max": 88, "alpha_frac": 0.4781070323, "autogenerated": false, "ratio": 5.35781990521327, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6335926937513271, "avg_score": null, "num_lines": null }
"""Abstract base class for kernel managers.""" #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import abc from IPython.utils.py3compat import with_metaclass class KernelManagerABC(with_metaclass(abc.ABCMeta, object)): """KernelManager ABC. The docstrings for this class can be found in the base implementation: `IPython.kernel.kernelmanager.KernelManager` """ @abc.abstractproperty def kernel(self): pass @abc.abstractproperty def shell_channel_class(self): pass @abc.abstractproperty def iopub_channel_class(self): pass @abc.abstractproperty def hb_channel_class(self): pass @abc.abstractproperty def stdin_channel_class(self): pass #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @abc.abstractmethod def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): pass @abc.abstractmethod def stop_channels(self): pass @abc.abstractproperty def channels_running(self): pass @abc.abstractproperty def shell_channel(self): pass @abc.abstractproperty def iopub_channel(self): pass @abc.abstractproperty def stdin_channel(self): pass @abc.abstractproperty def hb_channel(self): pass #-------------------------------------------------------------------------- # Kernel management #-------------------------------------------------------------------------- @abc.abstractmethod def start_kernel(self, **kw): pass @abc.abstractmethod def shutdown_kernel(self, now=False, restart=False): pass @abc.abstractmethod def restart_kernel(self, now=False, **kw): pass @abc.abstractproperty def has_kernel(self): pass @abc.abstractmethod def interrupt_kernel(self): pass @abc.abstractmethod def signal_kernel(self, signum): pass @abc.abstractmethod def is_alive(self): pass
{ "repo_name": "omni5cience/django-inlineformfield", "path": ".tox/py27/lib/python2.7/site-packages/IPython/kernel/managerabc.py", "copies": "7", "size": "2469", "license": "mit", "hash": 8951397862977182000, "line_mean": 22.5142857143, "line_max": 79, "alpha_frac": 0.5236938032, "autogenerated": false, "ratio": 4.938, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8961693803199999, "avg_score": null, "num_lines": null }
""" Abstract base class for overlays. This class is primarily used so that tools can easily distinguish between items underneath them. """ from traits.api import Instance from component import Component class AbstractOverlay(Component): """ The base class for overlays and underlays of the area. The only default additional feature of an overlay is that it implements an overlay() drawing method that overlays this component on top of another, without the components necessarily having an object containment-ownership relationship. """ # The component that this object overlays. This can be None. By default, if # this object is called to draw(), it tries to render onto this component. component = Instance(Component) # The default layer that this component draws into. draw_layer = "overlay" # The background color (overrides PlotComponent). # Typically, an overlay does not render a background. bgcolor = "transparent" #---------------------------------------------------------------------- # Abstract methods (to be implemented by subclasses) #---------------------------------------------------------------------- def overlay(self, other_component, gc, view_bounds=None, mode="normal"): """ Draws this component overlaid on another component. """ # Subclasses should implement this method. pass def _do_layout(self, component=None): """ Called by do_layout() to do an actual layout call; it bypasses some additional logic to handle null bounds and setting **_layout_needed**. """ pass #---------------------------------------------------------------------- # Concrete methods / reimplementations of Component methods #---------------------------------------------------------------------- def __init__(self, component=None, *args, **kw): if component is not None: self.component = component super(AbstractOverlay, self).__init__(*args, **kw) def do_layout(self, size=None, force=False, component=None): """ Tells this component to do a layout at a given size. This differs from the superclass Component.do_layout() in that it accepts an optional **component** argument. """ if self.layout_needed or force: if size is not None: self.bounds = size self._do_layout(component) self._layout_needed = False for underlay in self.underlays: if underlay.visible or underlay.invisible_layout: underlay.do_layout(component) for overlay in self.overlays: if overlay.visible or overlay.invisible_layout: overlay.do_layout(component) return def _draw(self, gc, view_bounds=None, mode="normal"): """ Draws the component, paying attention to **draw_order**. Overrides Component. """ if self.component is not None: self.overlay(self.component, gc, view_bounds, mode) return def _request_redraw(self): """ Overrides Enable Component. """ if self.component is not None: self.component.request_redraw() super(AbstractOverlay, self)._request_redraw() return # EOF
{ "repo_name": "tommy-u/enable", "path": "enable/abstract_overlay.py", "copies": "1", "size": "3341", "license": "bsd-3-clause", "hash": 7006842886356784000, "line_mean": 34.9247311828, "line_max": 79, "alpha_frac": 0.5899431308, "autogenerated": false, "ratio": 4.927728613569322, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6017671744369322, "avg_score": null, "num_lines": null }
""" Abstract base class for plot decorators and overlays. This class is primarily used so that tools can easily distinguish between data-related plot items and the decorators on them. """ from enable.api import Component from traits.api import Instance from plot_component import PlotComponent class AbstractOverlay(PlotComponent): """ The base class for overlays and underlays of the plot area. The only default additional feature of an overlay is that it implements an overlay() drawing method that overlays this component on top of another, without the components necessarily having an object containment-ownership relationship. """ # The component that this object overlays. This can be None. By default, if # this object is called to draw(), it tries to render onto this component. component = Instance(Component) # The default layer that this component draws into. draw_layer = "overlay" # The background color (overrides PlotComponent). # Typically, an overlay does not render a background. bgcolor = "transparent" def __init__(self, component=None, *args, **kw): if component is not None: self.component = component super(AbstractOverlay, self).__init__(*args, **kw) def overlay(self, other_component, gc, view_bounds=None, mode="normal"): """ Draws this component overlaid on another component. """ pass def _draw(self, gc, view_bounds=None, mode="normal"): """ Draws the component, paying attention to **draw_order**. If the overlay has a non-null .component, then renders as an overlay; otherwise, default to the standard PlotComponent behavior. Overrides PlotComponent. """ if self.component is not None: self.overlay(self.component, gc, view_bounds, mode) else: super(AbstractOverlay, self)._draw(gc, view_bounds, mode) return def _request_redraw(self): """ Overrides Enable Component. """ if self.component is not None: self.component.request_redraw() super(AbstractOverlay, self)._request_redraw() return # EOF
{ "repo_name": "tommy-u/chaco", "path": "chaco/abstract_overlay.py", "copies": "3", "size": "2206", "license": "bsd-3-clause", "hash": 4129242830442214000, "line_mean": 33.46875, "line_max": 79, "alpha_frac": 0.6745240254, "autogenerated": false, "ratio": 4.492871690427698, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6667395715827698, "avg_score": null, "num_lines": null }
"""Abstract base class for programs. """ from __future__ import print_function import os from . import limit import resource import signal import logging from .errors import ProgramError class Program(object): """Abstract base class for programs. """ runtime = 0 def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null', args=None, timelim=1000, memlim=1024): """Run the program. Args: infile (str): name of file to pass on stdin outfile (str): name of file to send stdout to errfile (str): name of file to send stderr ro args (list of str): additional command-line arguments to pass to the program timelim (int): CPU time limit in seconds memlim (int): memory limit in MB Returns: pair (status, runtime): status (int): exit status of the process runtime (float): user+sys runtime of the process, in seconds """ runcmd = self.get_runcmd(memlim=memlim) if runcmd == []: raise ProgramError('Could not figure out how to run %s' % self) if args is None: args = [] if self.should_skip_memory_rlimit(): memlim = None status, runtime = self.__run_wait(runcmd + args, infile, outfile, errfile, timelim, memlim) self.runtime = max(self.runtime, runtime) return status, runtime def code_size(self): """Subclasses should override this method with the total size of the source code.""" return 0 def should_skip_memory_rlimit(self): """Ugly workaround to accommodate Java -- the JVM will crash and burn if there is a memory rlimit applied and this will probably not change anytime soon [time of writing this: 2017-02-05], see e.g.: https://bugs.openjdk.java.net/browse/JDK-8071445 Subclasses of Program where the associated program is (or may be) a Java program need to override this method and return True (which will cause the memory rlimit to not be applied). 2019-02-22: Turns out sbcl for Common Lisp also wants to roam free and becomes sad when reined in by a memory rlimit. """ return False @staticmethod def __run_wait(argv, infile, outfile, errfile, timelim, memlim): logging.debug('run "%s < %s > %s 2> %s"', ' '.join(argv), infile, outfile, errfile) pid = os.fork() if pid == 0: # child try: # The Python interpreter internally sets some signal dispositions # to SIG_IGN (notably SIGPIPE), and unless we reset them manually # this leaks through to the program we exec. That can has some # funny side effects, like programs not crashing as expected when # trying to write to an interactive validator that has terminated # and closed the read end of a pipe. # # This *shouldn't* cause any verdict changes given the setup for # interactive problems, but reset them anyway, for sanity. if hasattr(signal, "SIGPIPE"): signal.signal(signal.SIGPIPE, signal.SIG_DFL) if hasattr(signal, "SIGXFZ"): signal.signal(signal.SIGXFZ, signal.SIG_DFL) if hasattr(signal, "SIGXFSZ"): signal.signal(signal.SIGXFSZ, signal.SIG_DFL) if timelim is not None: limit.try_limit(resource.RLIMIT_CPU, timelim, timelim + 1) if memlim is not None: limit.try_limit(resource.RLIMIT_AS, memlim * (1024**2), resource.RLIM_INFINITY) limit.try_limit(resource.RLIMIT_STACK, resource.RLIM_INFINITY, resource.RLIM_INFINITY) Program.__setfd(0, infile, os.O_RDONLY) Program.__setfd(1, outfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) Program.__setfd(2, errfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.execvp(argv[0], argv) except Exception as exc: print("Oops. Fatal error in child process:") print(exc) os.kill(os.getpid(), signal.SIGTERM) # Unreachable logging.error("Unreachable part of run_wait reached") os.kill(os.getpid(), signal.SIGTERM) (pid, status, rusage) = os.wait4(pid, 0) return status, rusage.ru_utime + rusage.ru_stime @staticmethod def __setfd(fd, filename, flag): tmpfd = os.open(filename, flag) os.dup2(tmpfd, fd) os.close(tmpfd)
{ "repo_name": "jsannemo/problemtools", "path": "problemtools/run/program.py", "copies": "2", "size": "4942", "license": "mit", "hash": 1130476951228096400, "line_mean": 38.8548387097, "line_max": 99, "alpha_frac": 0.5621205989, "autogenerated": false, "ratio": 4.238421955403087, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5800542554303088, "avg_score": null, "num_lines": null }
"""Abstract base class for programs. """ from __future__ import print_function import os import limit import resource import signal import logging from .errors import ProgramError class Program(object): """Abstract base class for programs. """ runtime = 0 def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null', args=None, timelim=1000, memlim=1024): """Run the program. Args: infile (str): name of file to pass on stdin outfile (str): name of file to send stdout to errfile (str): name of file to send stderr ro args (list of str): additional command-line arguments to pass to the program timelim (int): CPU time limit in seconds memlim (int): memory limit in MB Returns: pair (status, runtime): status (int): exit status of the process runtime (float): user+sys runtime of the process, in seconds """ runcmd = self.get_runcmd(memlim=memlim) if runcmd == []: raise ProgramError('Could not figure out how to run %s' % self) if args is None: args = [] if self.should_skip_memory_rlimit(): memlim = None status, runtime = self.__run_wait(runcmd + args, infile, outfile, errfile, timelim, memlim) self.runtime = max(self.runtime, runtime) return status, runtime def should_skip_memory_rlimit(self): """Ugly workaround to accommodate Java -- the JVM will crash and burn if there is a memory rlimit applied and this will probably not change anytime soon [time of writing this: 2017-02-05], see e.g.: https://bugs.openjdk.java.net/browse/JDK-8071445 Subclasses of Program where the associated program is (or may be) a Java program need to override this method and return True (which will cause the memory rlimit to not be applied). """ return False @staticmethod def __run_wait(argv, infile, outfile, errfile, timelim, memlim): logging.debug('run "%s < %s > %s 2> %s"', ' '.join(argv), infile, outfile, errfile) pid = os.fork() if pid == 0: # child try: if timelim is not None: limit.try_limit(resource.RLIMIT_CPU, timelim, timelim + 1) if memlim is not None: limit.try_limit(resource.RLIMIT_AS, memlim * (1024**2), resource.RLIM_INFINITY) limit.try_limit(resource.RLIMIT_STACK, resource.RLIM_INFINITY, resource.RLIM_INFINITY) Program.__setfd(0, infile, os.O_RDONLY) Program.__setfd(1, outfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) Program.__setfd(2, errfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.execvp(argv[0], argv) except Exception as exc: print("Oops. Fatal error in child process:") print(exc) os.kill(os.getpid(), signal.SIGTERM) # Unreachable logging.error("Unreachable part of run_wait reached") os.kill(os.getpid(), signal.SIGTERM) (pid, status, rusage) = os.wait4(pid, 0) return status, rusage.ru_utime + rusage.ru_stime @staticmethod def __setfd(fd, filename, flag): tmpfd = os.open(filename, flag) os.dup2(tmpfd, fd) os.close(tmpfd)
{ "repo_name": "godmar/problemtools", "path": "problemtools/run/program.py", "copies": "1", "size": "3684", "license": "mit", "hash": -2451829991607740000, "line_mean": 35.84, "line_max": 99, "alpha_frac": 0.5529315961, "autogenerated": false, "ratio": 4.111607142857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5164538738957143, "avg_score": null, "num_lines": null }
"""Abstract base class for programs. """ import os from . import limit import resource import signal import logging from .errors import ProgramError class Program(object): """Abstract base class for programs. """ runtime = 0 def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null', args=None, timelim=1000, memlim=1024): """Run the program. Args: infile (str): name of file to pass on stdin outfile (str): name of file to send stdout to errfile (str): name of file to send stderr ro args (list of str): additional command-line arguments to pass to the program timelim (int): CPU time limit in seconds memlim (int): memory limit in MB Returns: pair (status, runtime): status (int): exit status of the process runtime (float): user+sys runtime of the process, in seconds """ runcmd = self.get_runcmd(memlim=memlim) if runcmd == []: raise ProgramError('Could not figure out how to run %s' % self) if args is None: args = [] if self.should_skip_memory_rlimit(): memlim = None status, runtime = self.__run_wait(runcmd + args, infile, outfile, errfile, timelim, memlim) self.runtime = max(self.runtime, runtime) return status, runtime def code_size(self): """Subclasses should override this method with the total size of the source code.""" return 0 def should_skip_memory_rlimit(self): """Ugly workaround to accommodate Java -- the JVM will crash and burn if there is a memory rlimit applied and this will probably not change anytime soon [time of writing this: 2017-02-05], see e.g.: https://bugs.openjdk.java.net/browse/JDK-8071445 Subclasses of Program where the associated program is (or may be) a Java program need to override this method and return True (which will cause the memory rlimit to not be applied). 2019-02-22: Turns out sbcl for Common Lisp also wants to roam free and becomes sad when reined in by a memory rlimit. """ return False @staticmethod def __run_wait(argv, infile, outfile, errfile, timelim, memlim): logging.debug('run "%s < %s > %s 2> %s"', ' '.join(argv), infile, outfile, errfile) pid = os.fork() if pid == 0: # child try: # The Python interpreter internally sets some signal dispositions # to SIG_IGN (notably SIGPIPE), and unless we reset them manually # this leaks through to the program we exec. That can has some # funny side effects, like programs not crashing as expected when # trying to write to an interactive validator that has terminated # and closed the read end of a pipe. # # This *shouldn't* cause any verdict changes given the setup for # interactive problems, but reset them anyway, for sanity. if hasattr(signal, "SIGPIPE"): signal.signal(signal.SIGPIPE, signal.SIG_DFL) if hasattr(signal, "SIGXFZ"): signal.signal(signal.SIGXFZ, signal.SIG_DFL) if hasattr(signal, "SIGXFSZ"): signal.signal(signal.SIGXFSZ, signal.SIG_DFL) if timelim is not None: limit.try_limit(resource.RLIMIT_CPU, timelim, timelim + 1) if memlim is not None: limit.try_limit(resource.RLIMIT_AS, memlim * (1024**2), resource.RLIM_INFINITY) limit.try_limit(resource.RLIMIT_STACK, resource.RLIM_INFINITY, resource.RLIM_INFINITY) Program.__setfd(0, infile, os.O_RDONLY) Program.__setfd(1, outfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) Program.__setfd(2, errfile, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.execvp(argv[0], argv) except Exception as exc: print("Oops. Fatal error in child process:") print(exc) os.kill(os.getpid(), signal.SIGTERM) # Unreachable logging.error("Unreachable part of run_wait reached") os.kill(os.getpid(), signal.SIGTERM) (pid, status, rusage) = os.wait4(pid, 0) return status, rusage.ru_utime + rusage.ru_stime @staticmethod def __setfd(fd, filename, flag): tmpfd = os.open(filename, flag) os.dup2(tmpfd, fd) os.close(tmpfd)
{ "repo_name": "Kattis/problemtools", "path": "problemtools/run/program.py", "copies": "1", "size": "4904", "license": "mit", "hash": -2100037343985076700, "line_mean": 38.8699186992, "line_max": 99, "alpha_frac": 0.5605628059, "autogenerated": false, "ratio": 4.238547968885047, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0018350856812837401, "num_lines": 123 }
"""Abstract base class for resources. """ # FIXME[old/todo]: there are at least 3 different Installable classes # -> combine them into someting useful # standard imports from abc import ABC, abstractmethod import os import sys import time import importlib import logging # toolbox imports from .busy import BusyObservable from .fail import Failable # logging LOG = logging.getLogger(__name__) # FIXME[todo]: merge with other Resource classes # (in util.resources ans thirdparty)... class Installable(BusyObservable, Failable): """An installable is some resource may have some requirements to be used. It may provide methods to install such requirements. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._requirements = {} # # Requirements # def _add_requirement(self, name, what, *data) -> None: """Add a requirement for this :py:class:`Tool`. """ self._requirements[name] = (what,) + data # # Preparable # def _preparable(self) -> bool: """Check if required resources are available. """ for name, requirement in self._requirements.items(): if requirement[0] == 'file': if not os.path.exists(requirement[1]): LOG.warning("File requirement '%s' (filename='%s') " "for resource '%s' (%s) not found.", name, requirement[1], self.key, type(self).__name__) return False if requirement[0] == 'module': if requirement[1] in sys.modules: continue spec = importlib.util.find_spec(requirement[1]) if spec is None: LOG.warning("Module requirement '%s' (module=%s) " "for resource '%s' (%s) not found.", name, requirement[1], self.key, type(self).__name__) return False return True def _prepare(self, install: bool = False, **kwargs): # pylint: disable=arguments-differ """Load the required resources. """ super()._prepare(**kwargs) # FIXME[concept]: # In some situations, one requirement has to be prepared in # order to check for other requirements. # Example: checking the availability of an OpenCV data file # may require the 'cv2' module to be loaded in order to construct # the full path to that file. for requirement in self._requirements.values(): if requirement[0] == 'module' and requirement[1] not in globals(): globals()[requirement[1]] = \ importlib.import_module(requirement[1]) if not self.preparable: if install: self.install() else: raise RuntimeError("Resources required to prepare '" + type(self).__name__ + "' are not installed.") # # Installation # def install(self) -> None: """Install the resources required for this module. """ LOG.info("Installing requirements for resource '%s'.", self.__class__.__name__) start = time.time() self._install() end = time.time() LOG.info("Installation of requirements for resource '%s' " "finished after %.2fs", self.__class__.__name__, end-start) def _install(self) -> None: # FIXME[concept]: what is this method supposed to do # and which (sub)classes should implement this method. """Actual implementation of the installation procedure. """ # to be implemented by subclasses # raise NotImplementedError("Installation of resources for '" + # type(self).__name__ + # "' is not implemented (yet).") class Installable2(ABC): # FIXME[todo]: yet another implementation """A class depending on third party resources that can be installed. """ @property def installed(self) -> bool: """A property indicating whether the resource is fully installed and can be used. """ return self._installed() @abstractmethod def _installed(self) -> bool: """The actual check if the resource has been installed that is to be implemented by subclasses. """ return True def install(self, **kwargs) -> None: """Install the resource. """ if not self.installed: self._install(**kwargs) @abstractmethod def _install(self, **kwargs) -> None: """Do the actual installation. This method has to be implemented by subclasses. """
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/base/install.py", "copies": "1", "size": "4913", "license": "mit", "hash": -4117381987821258000, "line_mean": 31.9731543624, "line_max": 78, "alpha_frac": 0.5556686342, "autogenerated": false, "ratio": 4.788499025341131, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5844167659541131, "avg_score": null, "num_lines": null }
''' Abstract base class for subcommands that output to a file (or stdout). ''' from __future__ import absolute_import from abc import abstractmethod import argparse import io from bokeh.util.string import decode_utf8 from ..subcommand import Subcommand from ..util import build_single_handler_applications, die class FileOutputSubcommand(Subcommand): ''' Abstract subcommand to output applications as some type of file. ''' extension = None # subtype must set this to file extension @classmethod def files_arg(cls, output_type_name): ''' Returns a positional arg for ``files`` to specify file inputs to the command. Subclasses should include this to their class ``args``. Example: .. code-block:: python class Foo(FileOutputSubcommand): args = ( FileOutputSubcommand.files_arg("FOO"), # more args for Foo ) + FileOutputSubcommand.other_args() ''' return ('files', dict( metavar='DIRECTORY-OR-SCRIPT', nargs='+', help=("The app directories or scripts to generate %s for" % (output_type_name)), default=None )) @classmethod def other_args(cls): ''' Return args for ``-o`` / ``--output`` to specify where output should be written, and for a ``--args`` to pass on any additional command line args to the subcommand. Subclasses should append these to their class ``args``. Example: .. code-block:: python class Foo(FileOutputSubcommand): args = ( FileOutputSubcommand.files_arg("FOO"), # more args for Foo ) + FileOutputSubcommand.other_args() ''' return ( (('-o', '--output'), dict( metavar='FILENAME', action='append', type=str, help="Name of the output file or - for standard output." )), ('--args', dict( metavar='COMMAND-LINE-ARGS', nargs=argparse.REMAINDER, help="Any command line arguments remaining are passed on to the application handler", )), ) def filename_from_route(self, route, ext): ''' ''' if route == "/": base = "index" else: base = route[1:] return "%s.%s" % (base, ext) def invoke(self, args): ''' ''' argvs = { f : args.args for f in args.files} applications = build_single_handler_applications(args.files, argvs) if args.output is None: outputs = [] else: outputs = list(args.output) # copy so we can pop from it if len(outputs) > len(applications): die("--output/-o was given too many times (%d times for %d applications)" % (len(outputs), len(applications))) for (route, app) in applications.items(): doc = app.create_document() if len(outputs) > 0: filename = outputs.pop(0) else: filename = self.filename_from_route(route, self.extension) self.write_file(args, filename, doc) def write_file(self, args, filename, doc): ''' ''' contents = self.file_contents(args, doc) if filename == '-': print(decode_utf8(contents)) else: with io.open(filename, "w", encoding="utf-8") as file: file.write(decode_utf8(contents)) self.after_write_file(args, filename, doc) # can be overridden optionally def after_write_file(self, args, filename, doc): ''' ''' pass @abstractmethod def file_contents(self, args, doc): ''' Subtypes must override this to return the contents of the output file for the given doc. ''' raise NotImplementedError("file_contents")
{ "repo_name": "Karel-van-de-Plassche/bokeh", "path": "bokeh/command/subcommands/file_output.py", "copies": "6", "size": "4118", "license": "bsd-3-clause", "hash": 3638523416311359500, "line_mean": 26.4533333333, "line_max": 101, "alpha_frac": 0.5383681399, "autogenerated": false, "ratio": 4.642615558060879, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8180983697960879, "avg_score": null, "num_lines": null }
"""Abstract base class for tools. """ # standard imports from typing import Any, Union, Tuple, Iterator import time import logging from threading import Event # third party imports # toolbox imports from ..base.data import Datalike from ..base.register import RegisterClass from ..base.prepare import Preparable from ..base.data import Data, BatchWrapper, BatchDataItem # logging LOG = logging.getLogger(__name__) # FIXME[todo]: specify the observable interface: there should be # at least one change 'tool_changed' indicating that the tool # has changed in a way that it will now yield different results # to prior application. This may be caused by a change of configuration # parameters, exchange of the underlying engine, or the tool # becoming perpared ... class Tool(Preparable, metaclass=RegisterClass, method='tool_changed'): # pylint: disable=too-many-ancestors """:py:class:`Tool` is an abstract base class for tools. A Tool can be applied to process data. This abstract base class does not define any specific method for processing data, this should be done by subclasses (e.g., a Detector provides a detect method). In addition, :py:class:`Tool` provides some support for processing data with a Processor. This is an abstract base class and subclasses can (or have to) implement at least some of the following methods: :py:meth:`_preprocess_data`: Preprocess the given data. This method will be invoked before processing to bring the data into an appropriate format. This method should be overwritten if the Tool expects data to be in a specific format. :py:meth:`_process_data` and :py:meth:`_process_batch`: Do the actal processing and store results as tool-specific attributes in the given :py:class:`Data` object. Either `_process_data` or `_process_batch` have to be overwritten (but it is also allowed to owverwrite both). Class Attributes ---------------- external_result: Tuple[str] A tuple naming the values to be returned by an application of the :py:class:`Tool`. These values will be constructed by calling :py:meth:`_postprocess` on the intermediate values. internal_arguments: Tuple[str] = () A tuple naming the positional arguments for calling the internal processing function :py:meth:`_process`. Values for these names will be taken from the intermediate data structure, which should be filled by :py:class:`_preprocess` internal_result: Tuple[str] = () A name tuple naming the results of the internal processing. Values will be stored under this name in the intermediate data structure. Attributes ---------- timer: bool A flag indicating if timing information should be added to the :py:class:`Data` object during processing. """ def __init__(self, timer: bool = False, **kwargs): super().__init__(**kwargs) self.timer = timer LOG.info("New tool: %s (%s)", self.key, type(self).__name__) def __str__(self) -> str: return f"{type(self).__name__}[{self.key}]" def _prepare(self, **kwargs): # pylint: disable=arguments-differ """Prepare this tool. """ super()._prepare(**kwargs) # # Application API # external_result: Tuple[str] = () internal_arguments: Tuple[str] = () internal_result: Tuple[str] = () def __call__(self, *args, batch: bool = False, internal: bool = False, result: Union[str, Tuple[str]] = None, **kwargs) -> Any: """Apply the tool to the given arguments. Arguments --------- batch: bool A flag indicating that arguments are given a batch, do batch_processing. internal: bool A flag indicating that arguments are given in internal format, no preprocessing of the given data is required and no postprocessing is applied. result: Union[str, Tuple[str]] A description of the result values to return. If None is given, the Tools default will be used (if the `internal` flag is set, the internal result will be returned without postprocessing) Result ------ result: The result of applying this :py:class:`Tool`. This is usually a postprocessed result, as specified by the property :py:prop:`external_result`. If other return values are desired, those can be specified by the argument `result`. If the argument `internal` is `True`, instead of the postprocessed results, the internal result is returned, as specified by the property `internal` and computed by the :py:meth:`_process`. """ # FIXME[todo]: batch processing: # - a tool may implement (either or both) # _process_single() # _process_batch() # preprocessing, processing and postprocessing have to # deal with this # preprocessing data = self._do_preprocess(*args, internal=internal, batch=batch, result=result, **kwargs) internal_arguments = \ self._get_attributes(data, self.internal_arguments) result = data.result_ # processing if data.is_batch: internal_result = self._process(*internal_arguments) # self._process_batch(data, **kwargs) else: internal_result = self._process(*internal_arguments) # self._process_data(data, **kwargs) # postprocessing if internal and result is None: return internal_result self._add_attributes(data, self.internal_result, internal_result, simplify=True) return self._do_postprocess(data) def _process_data(self, data: Data, **kwargs) -> None: """Adapt the data. To be implemented by subclasses. Subclasses may augment the given data object with the result of their processing. Arguments --------- data: Data A :py:class:`Data` object that is guaranteed not to be a batch and to provide preprocessed data in its tool-specific 'preprocessed' attribute. """ if isinstance(data, BatchDataItem): raise ValueError("Use _process_batch to process batch data.") self._process_batch(BatchWrapper(data)) def _process_batch(self, batch: Data, **kwargs) -> None: """Process batch data. The base implementation just processes each batch element individually. Subclasses may overwrite this to do real batch processing. Arguments --------- batch: Data A :py:class:`Data` object that is guaranteed to be a batch and to provide preprocessed data in its tool-specific 'preprocessed' attribute. """ if isinstance(batch, BatchWrapper): raise ValueError("Use _process_data to process non-batch data.") for data in batch: self._process_data(data) def _do_preprocess(self, *args, internal: bool = False, batch: bool = False, result: Union[str, Tuple[str]] = None, **kwargs) -> Data: """Perform preprocessing of arguments. Arguments --------- internal: batch: result: """ if result is None and not internal: result = self.external_result elif isinstance(result, str): result = (result, ) if internal: data = Data(batch=batch) self._add_attributes(data, self.internal_arguments, args) else: data = self._preprocess(*args, batch=batch, **kwargs) data.add_attribute('result_', result) if 'duration' in result: data.add_attribute('start_', time.time()) return data def _do_postprocess(self, data: Data) -> Any: """Perform postprocessing and provide return value(s). Arguments --------- data: The (auxiliary) :py:class:`Data` object used for storing internal values. """ result = data.result_ for name in result: self._postprocess(data, name) return self._get_attributes(data, result, simplify=True) @staticmethod def _get_attributes(data: Data, what: Tuple[str], simplify: bool = False) -> Any: if simplify: if len(what) == 0: return None if len(what) == 1: return getattr(data, what[0]) return tuple(getattr(data, arg) for arg in what) @staticmethod def _add_attributes(data: Data, what: Tuple[str], args, simplify: bool = False) -> None: if simplify and len(what) == 0: pass elif simplify and len(what) == 1: data.add_attribute(what[0], args) else: for name, value in zip(what, args): data.add_attribute(name, value) # # Private interface (to be implemented by subclasses): # def _preprocess(self, *arg, batch: bool = False, **kwargs) -> Data: """Perform preprocessing of arguments. Preprocessed arguments are stored as attributes of an auxiliary :py:class:`Data` object. This method should be extended by subclasses to incorporate additional preprocessing operations. Arguments --------- batch: *args: positional arguments passed when invoking the tool. **kwargs: keyword arguments passed when invoking the tool. Result ------ data: The auxiliary :py:class:`Data` object to which preprocessed information have been added. Alle intermediate results obtained during application of the :py:class:`Tool` should also be stored in that object. """ data = Data(batch=batch) if self.timer: self.add_data_attribute(data, 'start', time.time()) return data def _process(self, *args, **kwargs) -> Any: """Do the actual processing. To be implemented by subclasses. Arguments --------- *args: The positional arguments of the functions. These are the (preprocessed) arguments named by the property :py:prop:`internal_arguments`. **kwargs: Optional keyword arguments passed when invoking the tool. """ raise NotImplementedError() def _postprocess(self, data: Data, what: str) -> None: """Perform postprocessing. This method is intended to be extended by subclasses. If doing so, such methods should call `super()._postprocess(data, what)` in case they can not handle the property `what` themself. Arguments --------- data: The auxiliary :py:class:`Data` object to which results of the postprocessing should be stored. what: An identifier specifying what kind of postprocessing operation should be performed on `data`. If the method can handle this type of processing, it should store the results as propery `what` in the `data` object and return. If the method can not handle that type of processing, it should call the method :py:meth:`_postprocess` of the super class. """ if what == 'duration': data.add_attribute('duration', time.time() - data.start_, batch=False) elif not hasattr(data, what): raise ValueError(f"Unknown property '{what}' for tool {self}") # # data processing # def apply(self, data: Data, *args, result: Union[str, Tuple[str]] = None, **kwargs) -> None: """Apply the tool to the given data object. Results are stored as data attributes. Arguments --------- data: The :py:class:`Data` object to which this :py:class:`Tool` should be applied. Results will be stored as attributes of that object. result: The name(s) of the attribute(s) under which the results should be stored in the `data` object. These names are prefixed by the name of this :py:class:`Tool`. """ # FIXME[todo]: batch data ... if result is None: result = self.external_result LOG.debug("Applying tool %r to data %r, result=%s", self, data, result) values = self(data, *args, result=result, **kwargs) # Store the result(s) in the data object if isinstance(result, str): result, values = (result,), (values, ) elif len(result) == 1: values = (values, ) if result is not None: for name, value in zip(result, values): # FIXME[hack]: batch handling batch = (data.is_batch and isinstance(value, list) and len(value) == len(data)) self.add_data_attribute(data, name, value, batch=batch) def add_data_attribute(self, data: Data, name: str, value: Any = None, batch: bool = True) -> None: """Add a tool specific attribute to a data object. """ data.add_attribute(self.key + '_' + name, value, batch=batch) def set_data_attribute(self, data: Data, name: str, value: Any, index: int = None) -> None: """Set a tool specific attribute in a data object. """ setattr(data if index is None else data[index], self.key + '_' + name, value) def get_data_attribute(self, data: Data, name: str, default: Any = None, index: int = None) -> Any: """Get a tool specific attribute from a data object. """ value = getattr(data if index is None else data[index], self.key + '_' + name, None) return default if value is None else value def add_data_attributes(self, data: Data, names: Tuple[str], values: Any) -> None: """Add a tool specific attribute to a data object. """ for name, value in zip(names, values): self.add_data_attribute(data, name, value) def duration(self, data: Data) -> float: """Provide the duration (in seconds) the tool needed for processing the given data. This property is only available after processing, and only if the timer was activated. """ return self.get_data_attribute(data, 'duration') class BatchTool(Tool): # FIXME[question/todo]: what is a batch tool supposed to be? """BatchTool """ def _do_preprocess(self, *args, internal: bool = False, batch: bool = False, result: Union[str, Tuple[str]] = None, **kwargs) -> Data: data = self._do_preprocess(*args, internal, batch, result, **kwargs) return data def _do_postprocess(self, data: Data) -> Any: return super()._do_postprocess(data) class IterativeTool(Tool): # pylint: disable=abstract-method """An iterative tool performs its operation as an iterative process. """ def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self._stop_all = False # # public interface # def _process(self, *arguments, **kwargs) -> Datalike: """Apply the tool to the given data. The input data will not be modified. The tool specific result will be returned. """ result = None for result in self._steps(*arguments, **kwargs): pass return result def step(self, *args, **kwargs) -> Datalike: """Perform a single step of the iterative process. """ data = self._do_preprocess(*args, *kwargs) internal_arguments = \ self._get_attributes(data, self.internal_arguments) internal_result = self._step(*internal_arguments) self._add_attributes(data, self.internal_result, internal_result, simplify=True) return self._do_postprocess(data) def steps(self, *args, steps: int = None, stop: Event = None, **kwargs) -> Iterator[Datalike]: """Iterate the steps of the iterative process, providing an intermediate result at every step. """ data = self._do_preprocess(*args, **kwargs) internal_arguments = \ self._get_attributes(data, self.internal_arguments) for internal_result in self._steps(*internal_arguments, steps=steps, stop=stop): self._add_attributes(data, self.internal_result, internal_result, simplify=True) yield self._do_postprocess(data) # # private (internal) methods # def _step(self, *arguments) -> Any: # to be implemented by subclasses raise NotImplementedError() def _steps(self, *arguments, steps: int = None, stop: Event = None) -> Iterator[Any]: data, arguments = (arguments[0:len(self.internal_result)], arguments[len(self.internal_result):]) while True: data = self._step(*data, *arguments) yield data if steps is not None: steps -= 1 if steps < 0: break if stop is not None: if stop.isSet(): break if self._stop_all: break # FIXME[todo]: data specific stop criteria if len(self.internal_result) == 0: data = () elif len(self.internal_result) == 1: data = (data, ) # # data processing # def apply(self, data: Data, *args, result: Union[str, Tuple[str]] = None, stepwise: bool = False, **kwargs) -> None: # pylint: disable=arguments-differ """Perform iterative processing on the data object. After each step, the relevant attributes of the data object are updated (and interested observers are informed. """ if result is None: result = self.external_result if result is None: result = () elif isinstance(result, str): result = (result, ) if stepwise: for values in self.steps(data, *args, result=result, **kwargs): if len(self.internal_result) == 0: values = () elif len(self.internal_result) == 1: values = (values, ) for name, value in zip(result, values): self.add_data_attribute(data, name, value) else: super().apply(self, data, *args, result=result, **kwargs)
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/tool/tool.py", "copies": "1", "size": "19427", "license": "mit", "hash": -877440637735791200, "line_mean": 35.7240075614, "line_max": 79, "alpha_frac": 0.5772378648, "autogenerated": false, "ratio": 4.580759254892714, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5657997119692714, "avg_score": null, "num_lines": null }
"""Abstract base class for tools working on images. """ # standard imports from typing import Tuple from threading import Thread import logging # third party imports import numpy as np # toolbox imports from .tool import Tool from ..base.data import Data from ..base.image import Image, Imagelike, ImageGenerator from ..util.image import imscale # logging LOG = logging.getLogger(__name__) # FIXME[design]: there is also a class dltb.base.image.ImageTool # -> it would make sense to combine these classes class ImageTool(Tool): # pylint: disable=abstract-method """Abstract base class for tools that operate on images. Several convenience methods are provided by this class. """ def __init__(self, size: Tuple[int, int] = None, max_size: Tuple[int, int] = None, min_size: Tuple[int, int] = None, resize_policy: str = 'error', **kwargs) -> None: """ Arguments --------- size: The exact size an image is expected to have for processing with this :py:class:`ImageTool`. min_size: The minimal image size processable by this :py:class:`ImageTool`. min_size: The maximal image size processable by this :py:class:`ImageTool`. resize_policy: The resize policy to adapt, if the aspect ratio of the image does not fit to this tool. Options are: 'pad' = pad the borders on the shorter side (with 0s), 'crop' = cut away some parts from the longer side, 'distort' = use different scaling along horizontal and vertical axis, resulting in a change of aspect ratio, 'error' = raise a ValueError. """ super().__init__(**kwargs) if size is not None: if max_size is not None: LOG.warning("Argument 'max_size' (=%s) is ignored " "when 'size' (=%s) is given.", max_size, size) if min_size is not None: LOG.warning("Argument 'min_size' (=%s) is ignored " "when 'size' (=%s) is given.", min_size, size) self._max_size = size self._min_size = size else: if (min_size is not None and max_size is not None and (min_size[0] > max_size[0] or min_size[1] > max_size[1])): raise ValueError(f"Minimal size (={min_size}) is larger than" f"maximal size (={max_size}).") self._max_size = max_size self._min_size = min_size self._resize_policy = resize_policy @property def input_size(self) -> Tuple[int, int]: """Get the input size on which this ImageTool operates. """ return self._min_size def fit_image(self, image: Imagelike, policy: str = None) -> np.ndarray: """Resize the image to be suitable for processing with this :py:class:`ImageTool`. """ size = image.shape[:1:-1] if self._min_size is not None: min_scale_x = max(self._min_size[0] / size[0], 1.) min_scale_y = max(self._min_size[1] / size[1], 1.) min_scale = max(min_scale_x, min_scale_y) else: min_scale = 0.0 if self._max_size is not None: max_scale_x = min(self._max_size[0] / size[0], 1.) max_scale_y = min(self._max_size[1] / size[1], 1.) max_scale = min(max_scale_x, max_scale_y) else: max_scale = float('inf') if min_scale <= 1. <= max_scale: # The image fits into the desired size return image if min_scale <= max_scale: # Choose the scale closest to 1. scale = max_scale if max_scale < 1. else min_scale return imscale(image, scale=scale) # Here max_scale < min_scale, that is there is no way to fit # the image to a valid size and maintain the aspect ratio. # Different strategies can be applied here. if self._resize_policy == 'error': ValueError(f"Cannot fit image of size {size} into the " f"acceptable size for procssing with {self} " f"(min_size={self._min_size}, " f"max_size={self._max_size}).") if self._resize_policy == 'pad': scale_x = min_scale scale_y = min_scale elif self._resize_policy == 'crop': scale_x = max_scale scale_y = max_scale else: # if self._resize_policy == 'distort': # FIXME[todo]: find optimal scale (minimal distortion) # (this should be a compromise between minimal and # maximal scaling) # FIXME[hack] scale_x = min_scale_x if min_scale_x < 1. else max_scale_x scale_y = min_scale_y if min_scale_y < 1. else max_scale_y scaled_image = imscale(image, scale=(scale_x, scale_y)) if self._resize_policy == 'pad': # FIXME[todo]: do padding padded_image = scaled_image return padded_image if self._resize_policy == 'pad': # FIXME[todo]: do cropping cropped_image = scaled_image return cropped_image return scaled_image def _preprocess(self, image: Imagelike, *args, **kwargs) -> Data: array = Image.as_array(image) data = super()._preprocess(self, array, *args, **kwargs) data.add_attribute('image', array) if self._min_size is not None or self._max_size is not None: data.add_attribute('scaled', self.fit_image(array)) return data # FIXME[todo] names: # image_region <- region_of_image [BoundingBox] # image_size <- size_of_image [Size] # image_patch <- [np.ndarray] # FIXME[todo] -> BoundingBox def region_of_image(self, image: Imagelike) -> Tuple[int, int, int, int]: """The region of the given image that would be processed by the tool. Result ------ left (x1): int top (y1): int right (x2): int bottom (y2): int """ image = Image.as_array(image) return (40, 40, image.shape[1]-40, image.shape[0]-40) # FIXME[hack] def size_of_image(self, image: Imagelike) -> Tuple[int, int]: """The size to which the region of interest of the image will be transformed when fed to this tool Result ------ width: int height: int """ return (100, 100) # FIXME[hack] # FIXME[todo]: integrate this in the framework - combine with IterativeTool class IterativeImageTool(ImageGenerator): """Base class for tools that can iteratively create images: Design pattern 1 (observer) --------------------------- * loop - hold the current version of the image as a object state - loop over the following steps: 1, perform one step of image creation 2. notify observers Design pattern 2 (functional): ------------------------------ * iterator for generating N (or unlimited) images for image in tool: do something with image for image in tool(30): Stateless inremental image create API: * next_image = tool.next(image) * bool tool.finished(image) """ def __init__(self) -> None: super().__init__() self._step = 0 self._image = None self._thread = None self._stop = True def __call__(self) -> np.ndarray: """The actual image generation. To be impolemented by subclasses. Result ------ image: np.ndarray The next image generated in standard representation (RGB, uint8, 0-255). """ raise NotImplementedError(f"{self.__class__.__name__} claims to " "be an ImageTool, but does not implement " "the __next__ method.") # # loop API (stateful) # @property def image(self) -> np.ndarray: """The current image provided by this :py:class:`ImageTool` in standard format (RGB, uint8, 0-255). """ return self._image @property def step(self) -> int: """The current step performed by this :py:class:`ImageTool` """ return self._step def next(self) -> None: """Do one step adapting the current image. This changes the property `image` and notifies observers that a new image is available. """ self._image = self() self._step += 1 self.change('image_changed') def __next__(self) -> np.ndarray: """Create a new image by doing the next step. Result ------ image: np.ndarray The image in standard format (RGB, uint8, 0-255). """ self.next() return self._image def loop(self, threaded: bool = False, **kwargs) -> None: if self.looping: raise RuntimeError("Object is already looping.") if threaded: self._thread = Thread(target=self._loop, kwargs=kwargs) self._thread.start() else: self._loop(**kwargs) def _loop(self, stop: int = None, steps: int = None) -> None: """Run an loop Arguments --------- steps: int The number of steps to perform. stop: int The loop will stop once the internal step counter reaches this number. If no stop value is given """ self._stop = False while not self._stop: try: self.next() if steps is not None: steps -= 1 if steps <= 0: self.stop() if stop is not None and self._step >= stop: self.stop() except KeyboardInterrupt: self.stop() @property def looping(self) -> bool: return not self._stop def stop(self) -> None: self._stop = True if self._thread is not None: self._thread.join() self._thread = None def pause(self) -> None: self._stop = True class ImageOptimizer(ImageTool): """An image optimizer can incrementally optimize an image, e.g., using some gradient-based optimization strategy. The :py:class:`ImageOptimizer` provides an API for accessing image optimizers. Stateless vs. stateful optimizer -------------------------------- An :py:class:`ImageOptimizer` has an internal state, the current version of the image. An :py:class:`ImageOptimizer` may also provide different loss values and evaluation metrics that may be . Internal optimizer ------------------ An Image optimizer may employ some external engine to do the optimization. In such a situation, the image may need to be converted into an internal representation prior to optimization, and the result has to be converted back into a standard image. """ # FIXME[todo]: optimization values def __call__(self, image: np.ndarray) -> np.ndarray: internal_image = self._internalize(image) internal_result = self._internal_optimizer(internal_image) return self._externalize(internal_result) def __next__(self) -> np.ndarray: self._internal_image = self._internal_optimizer(self._internal_image) # FIXME[concept/design]: should we store and return the image? # - There may be observers! self._image = self._externalize(self._internal_image) return self._image
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/tool/image.py", "copies": "1", "size": "11834", "license": "mit", "hash": -2509120092226646500, "line_mean": 32.0558659218, "line_max": 78, "alpha_frac": 0.5562785195, "autogenerated": false, "ratio": 4.171307719421924, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5227586238921924, "avg_score": null, "num_lines": null }
# Abstract Base Class from which other classes will inherit. Handles input from abc import abstractmethod import abc class Sort: def __init__(self): self.array = [] def main(self): """ handles the main logic :return: """ input_array = self.handle_input() print(self.sorting_process(input_array)) def handle_input(self): ''' This method takes the user input and ensures it is in the expected format and type. array_input(string) = string of numbers input by the user array(list) = output of splitting the user string into a list of ints :return: ''' array_input = input('Please enter a series of numbers ' 'separated by spaces\n') for input_number in array_input.split(): try: int(input_number) self.array += [int(input_number)] except: raise ValueError('You did not enter a series of numbers, ' 'please try again') return self.array @abstractmethod def sorting_process(self, input_array): """ This is where the sorting algorithm goes. """ return None
{ "repo_name": "tchitchikov/data_structures_and_algorithms", "path": "sorting/base_class.py", "copies": "1", "size": "1282", "license": "apache-2.0", "hash": -2380139826011794400, "line_mean": 28.8139534884, "line_max": 77, "alpha_frac": 0.5538221529, "autogenerated": false, "ratio": 4.819548872180451, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.587337102508045, "avg_score": null, "num_lines": null }
"""Abstract base class :py:class:`Metadata` for different kind of metadata. """ # standard imports import os # Generic imports import numpy as np # toolbox imports from .image import Region class Metadata: """Metadata for a datum from a Datasource. """ _regions = None def __init__(self, description: str = None, label: str = None, **attributes) -> None: if description is not None: self._description = description if label is not None: self._label = label self.__dict__.update(attributes) def add_region(self, position, **attributes): """Add a spatial region with attributes to this :py:class:`Metadata` object. """ if self._regions is None: self._regions = [] self._regions.append(Region(position, **attributes)) def has_regions(self): """Check if there are regions registered with this :py:class:`Metadata` object. """ return self._regions is not None def __bool__(self) -> bool: """`True` if there is at least one region registered with this :py:class:`Metadata` object. """ return self._regions is not None and len(self._regions) > 0 def __len__(self) -> int: """The number of regions registered with this :py:class:`Metadata` object. """ return 0 if self._regions is None else len(self._regions) @property def regions(self): """The list of regions registered with this :py:class:`Metadata` object. """ return self._regions def scale(self, factor) -> None: """Scale all positions of this metadata by a given factor. Arguments --------- factor: The scaling factor. This can either be a float, or a pair of floats in which case the first number is the horizontal (x) scaling factor and the second numger is the vertical (y) scaling factor. """ if self.has_regions(): for region in self.regions: region.scale(factor) @property def description(self): """A description for this :py:class:`Metadata` object. """ return self._description @property def label(self): """A label for this :py:class:`Metadata` object. """ return self._label def has_attribute(self, name) -> bool: """Check if this :py:class:`Metadata` object as the given attribute. """ return name in self.__dict__ def set_attribute(self, name, value) -> None: """Set an attribute for this :py:class:`Metadata` object. """ self.__dict__[name] = value def get_attribute(self, name): """Get an attribute value for this :py:class:`Metadata` object. """ return self.__dict__[name] def __str__(self): description = "Metadata:" for name, value in self.__dict__.items(): description += os.linesep + " " + name + ": " if isinstance(value, np.ndarray): description += f"Array({value.shape}, dtype={value.dtype})" else: description += str(value) return description
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/base/meta.py", "copies": "1", "size": "3303", "license": "mit", "hash": 5367941384248945000, "line_mean": 28.2300884956, "line_max": 76, "alpha_frac": 0.565849228, "autogenerated": false, "ratio": 4.451482479784366, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 113 }
"""Abstract base for implementing voting subsystems""" from django.db import models from django.db.models import Sum, Count, QuerySet, ObjectDoesNotExist, F, Q from django.utils.translation import ugettext as __ from collective_blog import settings from collective_blog.utils.errors import PermissionCheckFailed class VotesQuerySet(QuerySet): """Queryset of votes Allows for routine operations like getting overall rating etc. """ def score_query(self): """Aggregate score and num_votes""" result = self.aggregate( score=Sum('vote'), num_votes=Count('vote') ) if result['score'] is None: result['score'] = 0 return result def score(self): """Sum all votes""" return self.score_query()['score'] def num_votes(self): """Count all votes""" return self.score_query()['num_votes'] class VoteManager(models.Manager): """Wrap objects to the `VotesQuerySet`""" def get_queryset(self): return VotesQuerySet(self.model) class AbstractVote(models.Model): """A vote on an object by a User""" SCORES = ( (+1, '+1'), (-1, '-1'), ) user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='votes_for_%(app_label)s_%(class)s') vote = models.SmallIntegerField(choices=SCORES) object = None # Should be overwritten objects = VoteManager() class Meta: unique_together = (('user', 'object'), ) abstract = True def __str__(self): return '%s: %s on %s' % (self.user, self.vote, self.object) @classmethod def vote_for(cls, user, obj, vote): """Create or update a vote :param user: Who votes. :param obj: For what votes. :param vote: +1, 0, or -1. """ if vote not in [-1, 0, 1]: raise PermissionCheckFailed(__("Wrong vote")) if user.is_anonymous(): raise PermissionCheckFailed(__("You should be logged in")) if not user.is_active: raise PermissionCheckFailed(__("Your account is disabled")) try: v = cls.objects.get(user=user, object=obj) delta = vote - v.vote if vote == 0: v.delete() else: v.vote = vote v.save() except ObjectDoesNotExist: if vote != 0: delta = vote v = cls.objects.create(user=user, object=obj, vote=vote) else: return if delta != 0: print(delta, cls._caches) for (base_model, field_name), query in cls._caches.items(): query = query(v) base_model.objects.filter(query).update( **{field_name: F(field_name) + delta} ) @classmethod def _register(cls, field_name, base_model, query): if not hasattr(cls, '_caches'): cls._caches = {} cls._caches[(base_model, field_name)] = query def _default_cache_query(v): return Q(pk=v.object.pk) class VoteCacheField(models.SmallIntegerField): def __init__(self, vote_model, query=_default_cache_query, default=0): """A field that caches the sum of all votes for a particular object Whenever an object voted using the `vote_model` model, all objects that match a `query` will be updated. """ self.vote_model = vote_model self.query = query super(VoteCacheField, self).__init__(default=default, editable=False) def deconstruct(self): """Returns enough information to recreate the field""" name, path, args, kwargs = super(VoteCacheField, self).deconstruct() kwargs.update(dict(vote_model=self.vote_model, query=self.query)) kwargs.pop('editable') return name, path, args, kwargs def contribute_to_class(self, cls, name, virtual_only=False): super(VoteCacheField, self).contribute_to_class(cls, name, virtual_only) # noinspection PyProtectedMember self.vote_model._register(self.name, cls, self.query)
{ "repo_name": "AmatanHead/collective-blog", "path": "s_voting/models.py", "copies": "1", "size": "4193", "license": "mit", "hash": -5912701660011295000, "line_mean": 28.5281690141, "line_max": 80, "alpha_frac": 0.5816837586, "autogenerated": false, "ratio": 4.031730769230769, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5113414527830769, "avg_score": null, "num_lines": null }
"""Abstract base interface for vehicle data sources.""" import threading import logging import string import sys import datetime from openxc.formats.binary import ProtobufStreamer, ProtobufFormatter from openxc.formats.json import JsonStreamer, JsonFormatter LOG = logging.getLogger(__name__) class MissingPayloadFormatError(Exception): pass class DataSource(threading.Thread): """Interface for all vehicle data sources. This inherits from Thread and when a source is added to a vehicle it attempts to call the ``start()`` method if it exists. If an implementer of DataSource needs some background process to read data, it's just a matter of defining a ``run()`` method. A data source requires a callback method to be specified. Whenever new data is received, it will pass it to that callback. """ def __init__(self, callback=None, log_mode=None, payload_format=None): """Construct a new DataSource. By default, DataSource threads are marked as ``daemon`` threads, so they will die as soon as all other non-daemon threads in the process have quit. Kwargs: callback - function to call with any new data received """ super(DataSource, self).__init__() self.callback = callback self.daemon = True self.running = True self._streamer = None self._formatter = None self._format = payload_format self.logger = SourceLogger(self, log_mode) @property def format(self): return self._format @format.setter def format(self, value): self._format = value if value == "json": self.streamer = JsonStreamer() self.formatter = JsonFormatter elif value == "protobuf": self.streamer = ProtobufStreamer() self.formatter = ProtobufFormatter @property def streamer(self): if self._streamer is None: raise MissingPayloadFormatError("Unable to auto-detect payload " "format, must specify manually with --format [json|protobuf]") return self._streamer @streamer.setter def streamer(self, value): self._streamer = value @property def formatter(self): if self._formatter is None: raise MissingPayloadFormatError("Unable to auto-detect payload " "format, must specify manually with --format [json|protobuf]") return self._formatter @formatter.setter def formatter(self, value): self._formatter = value @property def bytes_received(self): return self.streamer.bytes_received def start(self): self.logger.start() super(DataSource, self).start() def stop(self): self.logger.stop() self.running = False def read(self, timeout=None): """Read data from the source. Kwargs: timeout (float) - if the source implementation could potentially block, timeout after this number of seconds. """ raise NotImplementedError("Don't use DataSource directly") def read_logs(self, timeout=None): """Read log data from the source. Kwargs: timeout (float) - if the source implementation could potentially block, timeout after this number of seconds. """ raise NotImplementedError("Don't use DataSource directly") class SourceLogger(threading.Thread): FILENAME_TEMPLATE = "%d-%m-%Y.%H-%M-%S" def __init__(self, source, mode="off"): super(SourceLogger, self).__init__() self.daemon = True self.source = source self.mode = mode self.file = None self.running = True if self.mode == "file": filename = "openxc-logs-%s.txt" % datetime.datetime.now().strftime( self.FILENAME_TEMPLATE) self.file = open(filename, 'w') def stop(self): self.running = False def record(self, message): if self.mode is not None and self.mode != "off" and len(message) > 0: log_file = None if self.mode == "stderr": log_file = sys.stderr elif self.mode == "file" and self.file is not None: log_file = self.file print("LOG: %s" % message, file=log_file) def run(self): """Continuously read data from the source and attempt to parse a valid message from the buffer of bytes. When a message is parsed, passes it off to the callback if one is set. """ message_buffer = "" while self.running: try: message_buffer += self.source.read_logs() except DataSourceError as e: if self.running: LOG.warn("Can't read logs from data source -- stopping: %s", e) break except NotImplementedError as e: LOG.info("%s doesn't support logging" % self) break while True: if "\x00" not in message_buffer: break record, _, remainder = message_buffer.partition("\x00") self.record(record) message_buffer = remainder class BytestreamDataSource(DataSource): """A source that receives data is a series of bytes, with discrete messages separated by a newline character. Subclasses of this class need only to implement the ``read`` method. """ def __init__(self, **kwargs): super(BytestreamDataSource, self).__init__(**kwargs) self.corrupted_messages = 0 self.running = True def _message_valid(self, message): if not hasattr(message, '__iter__'): return False if not ('name' in message and 'value' in message or ('id' in message and 'data' in message) or ('id' in message and 'bus' in message) or 'command_response' in message): return False return True def parse_messages(self): while True: message = self.streamer.parse_next_message() if message is None: break if not self._message_valid(message): self.corrupted_messages += 1 break if self.callback is not None: self.callback(message) self._receive_command_response(message) def run(self): """Continuously read data from the source and attempt to parse a valid message from the buffer of bytes. When a message is parsed, passes it off to the callback if one is set. """ while self.running: payload = "" try: payload = self.read() except DataSourceError as e: if self.running: LOG.warn("Can't read from data source -- stopping: %s", e) break try: self.streamer except MissingPayloadFormatError: json_chars = ['\x00'] json_chars.extend(string.printable) if all((char in json_chars for char in payload)): self.format = "json" else: self.format = "protobuf" self.streamer.receive(payload) self.parse_messages() def _receive_command_response(self, message): # TODO the controller/source are getting a little mixed up since the # controller now needs to receive responses from the soruce side, maybe # just mix them again. the only exception to being both is a trace # source, and we can just leave the controller methods on that # unimplemented self.open_requests = getattr(self, 'open_requests', []) for open_request in self.open_requests: open_request.put(message) class DataSourceError(Exception): pass
{ "repo_name": "openxc/openxc-python", "path": "openxc/sources/base.py", "copies": "1", "size": "8038", "license": "bsd-3-clause", "hash": 8993140826312894000, "line_mean": 32.3526970954, "line_max": 83, "alpha_frac": 0.5879572033, "autogenerated": false, "ratio": 4.638199653779573, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0006359534689854159, "num_lines": 241 }
"""Abstract base model class(es).""" from peanuts.lib.database import db __all__ = ['Model'] class Model(db.Model): """A base model class.""" __abstract__ = True def get_dictionary(self, verbosity='all'): """Returns a JSON-serializable dictionary representation of the model. This can and should be overridden on individual model classes, defining how to respond to different verbosities and/or fields that should never be returned to the frontend (e.g password). Kwargs: verbosity (str): A string indicating how much information should be returned. This accepts only 'all' and 'none' unless the model base class stipulates otherwise. (default='all') """ return dict([ # This maps the model to its columns except for id, for which the # database mapping and python mapping differ. (c.name, str(getattr(self, c.name if c.name != 'id' else 'id_'))) for c in self.__table__.columns ]) if verbosity == 'all' else {}
{ "repo_name": "astex/peanuts", "path": "peanuts/models/base.py", "copies": "1", "size": "1144", "license": "mit", "hash": 6928335613869487000, "line_mean": 34.75, "line_max": 78, "alpha_frac": 0.5865384615, "autogenerated": false, "ratio": 4.612903225806452, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 32 }
"""Abstract base model.""" from datetime import datetime from pma_api.config import IGNORE_FIELD_PREFIX from pma_api.models import db from pma_api.models.string import EnglishString def prune_ignored_fields(kwargs): """Prune ignored fields. Args: kwargs (dict): Keyword arguments. """ to_pop = [k for k in kwargs.keys() if k.startswith(ApiModel.ignore_field_prefix)] for key in to_pop: kwargs.pop(key) class ApiModel(db.Model): """Abstract base model.""" __abstract__ = True ignore_field_prefix = IGNORE_FIELD_PREFIX def __init__(self, *args, **kwargs): """Perform common tasks on kwargs.""" self.prune_ignored_fields(kwargs) self.empty_to_none(kwargs) super().__init__(*args, **kwargs) @staticmethod def prune_ignored_fields(kwargs): """Prune ignored fields. Args: kwargs (dict): Keyword arguments. """ prune_ignored_fields(kwargs) @staticmethod def update_kwargs_date(kwargs, source_key, fstr): """Update dates to correct format. Args: kwargs (dict): Keyword arguments. source_key (str): The source date string. fstr (str): The format string for the date. """ string_date = kwargs[source_key] this_date = datetime.strptime(string_date, fstr) kwargs[source_key] = this_date @staticmethod def update_kwargs_english(kwargs, source_key, target_key): """Translate API query parameters to equivalent in model. API URL query parameters are in many case abstracted away from the db_models and underlying database. For example, 'survey' in the API would equate to 'survey_id' in the database model. Side effects: kwargs are modified so that new key of name matching 'target_key' argument is inserted, with value as the corresponding record id. Args: source_key (str): The API query parameter. target_key (str): The equivalent model field. kwargs (dict): The keyword argument representation of query parameters submitted by the API request. """ english = kwargs.pop(source_key) if english: record = EnglishString.query.filter_by(english=english).first() if record: kwargs[target_key] = record.id else: new_record = EnglishString.insert_unique(english) kwargs[target_key] = new_record.id else: kwargs[target_key] = None @staticmethod def set_kwargs_id(kwargs, source_key, target_key, model, required=True): """Set id of data model field based on code. If the target key is present and it has a value, use that. Otherwise use the source key and do a lookup. Args: source_key (str): Model 'code' field name. target_key (str): model 'id' field name. model (class): The corresponding SqlAlchemy model class. required (bool): True if target key should have an ID. kwargs (dict): The keyword argument representation of query parameters submitted by the API request. Raises: KeyError: If identification code for record was not supplied or did not resolve. """ code = kwargs.pop(source_key, None) fk_id = kwargs.pop(target_key, None) empty_code = code == '' or code is None empty_fk_id = fk_id == '' or fk_id is None if not empty_fk_id: kwargs[target_key] = fk_id elif empty_code and not required: kwargs[target_key] = None else: record = model.query.filter_by(code=code).first() if record: kwargs[target_key] = record.id else: msg = 'No record with code "{}" in "{}"' msg = msg.format(code, model.__tablename__) raise KeyError(msg) @staticmethod def empty_to_none(kwargs): """Convert any empty strings to None type. Args: kwargs (dict): Keyword arguments. """ for key in kwargs: if kwargs[key] == '': kwargs[key] = None @staticmethod def namespace(old_dict, prefix, index=None): """Namespaces keys in a dict by doing a prepend to key strings. Args: old_dict (dict): The original dictionary. prefix (str): Prefix to prepend. index (int): Optional index to append after the prefix. This is to handle situations where a field has one or more sibling fields that represent essentially the same variable, e.g. "characteristic1", "characteristic2". Returns: dict: Namespace formatted dictionary. """ if index is not None: prefix += str(index) new_dict = {'.'.join((prefix, k)): v for k, v in old_dict.items()} return new_dict @classmethod def get_by_code(cls, lookup): """Return an item by code or list of codes. Args: lookup (str or seq of str): The codes to lookup Returns: The records that match that code """ if lookup is None: return [] if isinstance(lookup, str): query = cls.query.filter(cls.code == lookup) else: # assume it is a sequence of codes query = cls.query.filter(cls.code.in_(lookup)) records = query.all() return records
{ "repo_name": "joeflack4/pma-api", "path": "pma_api/models/api_base.py", "copies": "1", "size": "5645", "license": "mit", "hash": 1016285649859986700, "line_mean": 32.8023952096, "line_max": 79, "alpha_frac": 0.5828166519, "autogenerated": false, "ratio": 4.379363847944143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 167 }
"""Abstract base models used by the page management application.""" from __future__ import unicode_literals from django.db import models from django.shortcuts import render from django.utils.encoding import python_2_unicode_compatible from cms import externals from cms.apps.media.models import ImageRefField from cms.models.managers import OnlineBaseManager, PublishedBaseManager, SearchMetaBaseManager, PageBaseManager class PublishedBase(models.Model): """A model with publication controls.""" objects = PublishedBaseManager() class Meta: abstract = True class PublishedBaseSearchAdapter(externals.watson.SearchAdapter): """Base search adapter for PublishedBase derivatives.""" def get_live_queryset(self): """Selects only live models.""" return self.model.objects.all() class OnlineBase(PublishedBase): objects = OnlineBaseManager() is_online = models.BooleanField( "online", default=True, help_text=( "Uncheck this box to remove the page from the public website. " "Logged-in admin users will still be able to view this page by clicking the 'view on site' button." ), ) class Meta: abstract = True class OnlineBaseSearchAdapter(PublishedBaseSearchAdapter): """Base search adapter for OnlineBase derivatives.""" class SearchMetaBase(OnlineBase): """Base model for models used to generate a standalone HTML page.""" objects = SearchMetaBaseManager() # SEO fields. browser_title = models.CharField( max_length=1000, blank=True, help_text=( "The heading to use in the user's web browser. " "Leave blank to use the page title. " "Search engines pay particular attention to this attribute." ) ) meta_description = models.TextField( "description", blank=True, help_text="A brief description of the contents of this page.", ) sitemap_priority = models.FloatField( "priority", choices=( (1.0, "Very high"), (0.8, "High"), (0.5, "Medium"), (0.3, "Low"), (0.0, "Very low"), ), default=None, blank=True, null=True, help_text=( "The relative importance of this content in your site. Search engines use this " "as a hint when ranking the pages within your site." ), ) sitemap_changefreq = models.IntegerField( "change frequency", choices=( (1, "Always"), (2, "Hourly"), (3, "Daily"), (4, "Weekly"), (5, "Monthly"), (6, "Yearly"), (7, "Never") ), default=None, blank=True, null=True, help_text=( "How frequently you expect this content to be updated." "Search engines use this as a hint when scanning your site for updates." ), ) robots_index = models.BooleanField( "allow indexing", default=True, help_text=( "Use this to prevent search engines from indexing this page. " "Disable this only if the page contains information which you do not wish " "to show up in search results." ), ) robots_follow = models.BooleanField( "follow links", default=True, help_text=( "Use this to prevent search engines from following any links they find in this page. " "Disable this only if the page contains links to other sites that you do not wish to " "publicise." ), ) robots_archive = models.BooleanField( "allow archiving", default=True, help_text=( "Use this to prevent search engines from archiving this page. " "Disable this only if the page is likely to change on a very regular basis. " ), ) # Open graph fields og_title = models.CharField( verbose_name='title', blank=True, max_length=100, help_text='Title that will appear on the Facebook post, it is limited to 100 characters' 'because Facebook truncates the title to 88 characters.' ) og_description = models.TextField( verbose_name='description', blank=True, max_length=300, help_text='Description that will appear ont he Facebook post, it is limited to 300' 'characters but is recommended not to use anything over 200.' ) og_image = ImageRefField( verbose_name='image', blank=True, null=True, help_text='The recommended image size is 1200x627 (1.91/1 ratio) this gives you a big' 'stand out thumbnail. Using an image smaller than 400x209 will give you a very' 'small thumbnail and splits your post into 2 columns.' '' 'If you have text on the image make sure it is centered as Facebook crops images' 'to get the text centered so you may lose some of your image.' ) # Twitter card fields twitter_card = models.IntegerField( verbose_name='card', choices=[ (0, 'Summary'), (1, 'Photo'), (2, 'Video'), (3, 'Product'), (4, 'App'), (5, 'Gallery'), (6, 'Large Summary'), ], blank=True, null=True, default=None, help_text='The type of content on the page, most of the time summary will suffice' '' 'Before you can benefit with any of these fields make sure to go to ' 'https://dev.twitter.com/docs/cards/validation/validator and get approved' ) twitter_title = models.CharField( verbose_name='title', blank=True, max_length=70, help_text='The title that appears on the Twitter card, it is limited to 70 characters.' ) twitter_description = models.TextField( verbose_name='description', blank=True, max_length=200, help_text='Description that will appear on the Twitter card, it is limited to 200 characters' 'You don\'t need to focus on keywords as this does\'nt effect SEO so focus on' 'copy that compliments the tweet and title.' ) twitter_image = ImageRefField( verbose_name='image', blank=True, null=True, help_text='The minimum size it needs to be is 280x150, if you want to use a larger image' 'make sure the card type is set to "Large Summary"' ) def get_context_data(self): """Returns the SEO context data for this page.""" title = str(self) # Return the context. return { "meta_description": self.meta_description, "robots_index": self.robots_index, "robots_archive": self.robots_archive, "robots_follow": self.robots_follow, "title": self.browser_title or title, "header": title, "og_title": self.og_title, "og_description": self.og_description, "og_image": self.og_image, "twitter_card": self.twitter_card, "twitter_title": self.twitter_title, "twitter_description": self.twitter_description, "twitter_image": self.twitter_image } def render(self, request, template, context=None, **kwargs): """Renders a template as a HttpResponse using the context of this page.""" page_context = self.get_context_data() page_context.update(context or {}) return render(request, template, page_context, **kwargs) class Meta: abstract = True class SearchMetaBaseSearchAdapter(OnlineBaseSearchAdapter): """Search adapter for SearchMetaBase derivatives.""" def get_description(self, obj): """Returns the meta description.""" return obj.meta_description def get_live_queryset(self): """Selects only live models.""" return super(SearchMetaBaseSearchAdapter, self).get_live_queryset().filter( robots_index=True, ) @python_2_unicode_compatible class PageBase(SearchMetaBase): """ An enhanced SearchMetaBase with a sensible set of common features suitable for most pages. """ objects = PageBaseManager() # Base fields. slug = models.SlugField( help_text='A user friendly URL' ) title = models.CharField( max_length=1000, ) # Navigation fields. short_title = models.CharField( max_length=200, blank=True, help_text=( "A shorter version of the title that will be used in site navigation. " "Leave blank to use the full-length title." ), ) # SEO fields. def get_context_data(self): """Returns the SEO context data for this page.""" context_data = super(PageBase, self).get_context_data() context_data.update({ "title": self.browser_title or self.title, "header": self.title, }) return context_data # Base model methods. def __str__(self): """ Returns the short title of this page, falling back to the standard title. """ return self.short_title or self.title class Meta: abstract = True class PageBaseSearchAdapter(SearchMetaBaseSearchAdapter): """Search adapter for PageBase derivatives.""" def get_title(self, obj): """Returns the title of the page.""" return obj.title
{ "repo_name": "danielsamuels/cms", "path": "cms/models/base.py", "copies": "1", "size": "9777", "license": "bsd-3-clause", "hash": 4971717417932243000, "line_mean": 29.0830769231, "line_max": 111, "alpha_frac": 0.5911833896, "autogenerated": false, "ratio": 4.378414688759516, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0008274985458025495, "num_lines": 325 }
"""Abstract base models used by the page management application.""" from __future__ import unicode_literals import six from django.db import models from django.shortcuts import render from django.utils.encoding import python_2_unicode_compatible from cms import externals from cms.apps.media.models import ImageRefField from cms.models.managers import OnlineBaseManager, PublishedBaseManager, SearchMetaBaseManager, PageBaseManager class PublishedBase(models.Model): """A model with publication controls.""" objects = PublishedBaseManager() class Meta: abstract = True class PublishedBaseSearchAdapter(externals.watson.SearchAdapter): """Base search adapter for PublishedBase derivatives.""" def get_live_queryset(self): """Selects only live models.""" return self.model.objects.all() class OnlineBase(PublishedBase): objects = OnlineBaseManager() is_online = models.BooleanField( "online", default=True, help_text=( "Uncheck this box to remove the page from the public website. " "Logged-in admin users will still be able to view this page by clicking the 'view on site' button." ), ) class Meta: abstract = True class OnlineBaseSearchAdapter(PublishedBaseSearchAdapter): """Base search adapter for OnlineBase derivatives.""" class SearchMetaBase(OnlineBase): """Base model for models used to generate a standalone HTML page.""" objects = SearchMetaBaseManager() # SEO fields. browser_title = models.CharField( max_length=1000, blank=True, help_text=( "The heading to use in the user's web browser. " "Leave blank to use the page title. " "Search engines pay particular attention to this attribute." ) ) meta_description = models.TextField( "description", blank=True, help_text="A brief description of the contents of this page.", ) sitemap_priority = models.FloatField( "priority", choices=( (1.0, "Very high"), (0.8, "High"), (0.5, "Medium"), (0.3, "Low"), (0.0, "Very low"), ), default=None, blank=True, null=True, help_text=( "The relative importance of this content on your site. Search engines use this " "as a hint when ranking the pages within your site." ), ) sitemap_changefreq = models.IntegerField( "change frequency", choices=( (1, "Always"), (2, "Hourly"), (3, "Daily"), (4, "Weekly"), (5, "Monthly"), (6, "Yearly"), (7, "Never") ), default=None, blank=True, null=True, help_text=( "How frequently you expect this content to be updated. " "Search engines use this as a hint when scanning your site for updates." ), ) robots_index = models.BooleanField( "allow indexing", default=True, help_text=( "Uncheck to prevent search engines from indexing this page. " "Do this only if the page contains information which you do not wish " "to show up in search results." ), ) robots_follow = models.BooleanField( "follow links", default=True, help_text=( "Uncheck to prevent search engines from following any links they find in this page. " "Do this only if the page contains links to other sites that you do not wish to " "publicise." ), ) robots_archive = models.BooleanField( "allow archiving", default=True, help_text=( "Uncheck this to prevent search engines from archiving this page. " "Do this this only if the page is likely to change on a very regular basis. " ), ) # Open Graph fields og_title = models.CharField( verbose_name='title', blank=True, max_length=100, help_text='Title that will appear on Facebook posts. This is limited to 100 characters, ' 'but Facebook will truncate the title to 88 characters.' ) og_description = models.TextField( verbose_name='description', blank=True, max_length=300, help_text='Description that will appear on Facebook posts. It is limited to 300 ' 'characters, but it is recommended that you do not use anything over 200.' ) og_image = ImageRefField( verbose_name='image', blank=True, null=True, help_text='The recommended image size is 1200x627 (1.91:1 ratio); this gives you a big ' 'stand out thumbnail. Using an image smaller than 400x209 will give you a ' 'small thumbnail and will splits posts into 2 columns. ' 'If you have text on the image make sure it is centered.' ) # Twitter card fields twitter_card = models.IntegerField( verbose_name='card', choices=[ (0, 'Summary'), (1, 'Photo'), (2, 'Video'), (3, 'Product'), (4, 'App'), (5, 'Gallery'), (6, 'Large Summary'), ], blank=True, null=True, default=None, help_text='The type of content on the page. Most of the time "Summary" will suffice. ' 'Before you can benefit from any of these fields make sure to go to ' 'https://dev.twitter.com/docs/cards/validation/validator and get approved.' ) twitter_title = models.CharField( verbose_name='title', blank=True, max_length=70, help_text='The title that appears on the Twitter card, it is limited to 70 characters.' ) twitter_description = models.TextField( verbose_name='description', blank=True, max_length=200, help_text='Description that will appear on Twitter cards. It is limited ' 'to 200 characters. This does\'nt effect SEO, so focus on copy ' 'that complements the tweet and title rather than on keywords.' ) twitter_image = ImageRefField( verbose_name='image', blank=True, null=True, help_text='The minimum size it needs to be is 280x150. If you want to use a larger image' 'make sure the card type is set to "Large Summary".' ) def get_context_data(self): """Returns the SEO context data for this page.""" title = six.text_type(self) # Return the context. return { "meta_description": self.meta_description, "robots_index": self.robots_index, "robots_archive": self.robots_archive, "robots_follow": self.robots_follow, "title": self.browser_title or title, "header": title, "og_title": self.og_title, "og_description": self.og_description, "og_image": self.og_image, "twitter_card": self.twitter_card, "twitter_title": self.twitter_title, "twitter_description": self.twitter_description, "twitter_image": self.twitter_image } def render(self, request, template, context=None, **kwargs): """Renders a template as a HttpResponse using the context of this page.""" page_context = self.get_context_data() page_context.update(context or {}) return render(request, template, page_context, **kwargs) class Meta: abstract = True class SearchMetaBaseSearchAdapter(OnlineBaseSearchAdapter): """Search adapter for SearchMetaBase derivatives.""" def get_description(self, obj): """Returns the meta description.""" return obj.meta_description def get_live_queryset(self): """Selects only live models.""" return super(SearchMetaBaseSearchAdapter, self).get_live_queryset().filter( robots_index=True, ) @python_2_unicode_compatible class PageBase(SearchMetaBase): """ An enhanced SearchMetaBase with a sensible set of common features suitable for most pages. """ objects = PageBaseManager() # Base fields. slug = models.SlugField( help_text='A user friendly URL' ) title = models.CharField( max_length=1000, ) # Navigation fields. short_title = models.CharField( max_length=200, blank=True, help_text=( "A shorter version of the title that will be used in site navigation. " "Leave blank to use the full-length title." ), ) # SEO fields. def get_context_data(self): """Returns the SEO context data for this page.""" context_data = super(PageBase, self).get_context_data() context_data.update({ "title": self.browser_title or self.title, "header": self.title, }) return context_data # Base model methods. def __str__(self): """ Returns the short title of this page, falling back to the standard title. """ return self.short_title or self.title class Meta: abstract = True class PageBaseSearchAdapter(SearchMetaBaseSearchAdapter): """Search adapter for PageBase derivatives.""" def get_title(self, obj): """Returns the title of the page.""" return obj.title
{ "repo_name": "dan-gamble/cms", "path": "cms/models/base.py", "copies": "1", "size": "9646", "license": "bsd-3-clause", "hash": 2837676956618484000, "line_mean": 28.8637770898, "line_max": 111, "alpha_frac": 0.5922662243, "autogenerated": false, "ratio": 4.352888086642599, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5445154310942599, "avg_score": null, "num_lines": null }
"""Abstract base models used by the page management application.""" from django.db import models from django.shortcuts import render from cms import externals from cms.models.managers import OnlineBaseManager, PublishedBaseManager, SearchMetaBaseManager, PageBaseManager class PublishedBase(models.Model): """A model with publication controls.""" objects = PublishedBaseManager() class Meta: abstract = True class PublishedBaseSearchAdapter(externals.watson.SearchAdapter): """Base search adapter for PublishedBase derivatives.""" def get_live_queryset(self): """Selects only live models.""" return self.model.objects.all() class OnlineBase(PublishedBase): objects = OnlineBaseManager() is_online = models.BooleanField( "online", default = True, help_text = ( "Uncheck this box to remove the page from the public website. " "Logged-in admin users will still be able to view this page by clicking the 'view on site' button." ), ) class Meta: abstract = True class OnlineBaseSearchAdapter(PublishedBaseSearchAdapter): """Base search adapter for OnlineBase derivatives.""" class SearchMetaBase(OnlineBase): """Base model for models used to generate a standalone HTML page.""" objects = SearchMetaBaseManager() # SEO fields. browser_title = models.CharField( max_length = 1000, blank = True, help_text = ( "The heading to use in the user's web browser. " "Leave blank to use the page title. " "Search engines pay particular attention to this attribute." ) ) meta_keywords = models.CharField( "keywords", max_length = 1000, blank = True, help_text = ( "A comma-separated list of keywords for this page. Use this to specify common mis-spellings " "or alternative versions of important words in this page." ), ) meta_description = models.TextField( "description", blank = True, help_text = "A brief description of the contents of this page.", ) sitemap_priority = models.FloatField( "priority", choices = ( (1.0, "Very high"), (0.8, "High"), (0.5, "Medium"), (0.3, "Low"), (0.0, "Very low"), ), default = None, blank = True, null = True, help_text = ( "The relative importance of this content in your site. Search engines use this " "as a hint when ranking the pages within your site." ), ) sitemap_changefreq = models.IntegerField( "change frequency", choices = ( (1, "Always"), (2, "Hourly"), (3, "Daily"), (4, "Weekly"), (5, "Monthly"), (6, "Yearly"), (7, "Never") ), default = None, blank = True, null = True, help_text = ( "How frequently you expect this content to be updated." "Search engines use this as a hint when scanning your site for updates." ), ) robots_index = models.BooleanField( "allow indexing", default = True, help_text = ( "Use this to prevent search engines from indexing this page. " "Disable this only if the page contains information which you do not wish " "to show up in search results." ), ) robots_follow = models.BooleanField( "follow links", default = True, help_text = ( "Use this to prevent search engines from following any links they find in this page. " "Disable this only if the page contains links to other sites that you do not wish to " "publicise." ), ) robots_archive = models.BooleanField( "allow archiving", default = True, help_text = ( "Use this to prevent search engines from archiving this page. " "Disable this only if the page is likely to change on a very regular basis. " ), ) def get_context_data(self): """Returns the SEO context data for this page.""" title = unicode(self) # Return the context. return { "meta_description": self.meta_description, "meta_keywords": self.meta_keywords, "robots_index": self.robots_index, "robots_archive": self.robots_archive, "robots_follow": self.robots_follow, "title": self.browser_title or title, "header": title, } def render(self, request, template, context=None, **kwargs): """Renders a template as a HttpResponse using the context of this page.""" page_context = self.get_context_data() page_context.update(context or {}) return render(request, template, page_context, **kwargs) class Meta: abstract = True class SearchMetaBaseSearchAdapter(OnlineBaseSearchAdapter): """Search adapter for SearchMetaBase derivatives.""" def get_description(self, obj): """Returns the meta description.""" return obj.meta_description def get_live_queryset(self): """Selects only live models.""" return super(OnlineBaseSearchAdapter, self).get_live_queryset().filter( robots_index = True, ) class PageBase(SearchMetaBase): """ An enhanced SearchMetaBase with a sensible set of common features suitable for most pages. """ objects = PageBaseManager() # Base fields. url_title = models.SlugField( "URL title", ) title = models.CharField( max_length = 1000, ) # Navigation fields. short_title = models.CharField( max_length = 200, blank = True, help_text = ( "A shorter version of the title that will be used in site navigation. " "Leave blank to use the full-length title." ), ) # SEO fields. def get_context_data(self): """Returns the SEO context data for this page.""" context_data = super(PageBase, self).get_context_data() context_data.update({ "title": self.browser_title or self.title, "header": self.title, }) return context_data # Base model methods. def __unicode__(self): """ Returns the short title of this page, falling back to the standard title. """ return self.short_title or self.title class Meta: abstract = True class PageBaseSearchAdapter(SearchMetaBaseSearchAdapter): """Search adapter for PageBase derivatives.""" def get_title(self, obj): """Returns the title of the page.""" return obj.title
{ "repo_name": "etianen/cms", "path": "src/cms/models/base.py", "copies": "2", "size": "7140", "license": "bsd-3-clause", "hash": -7901820232362612000, "line_mean": 27.4501992032, "line_max": 111, "alpha_frac": 0.5703081232, "autogenerated": false, "ratio": 4.547770700636943, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6118078823836942, "avg_score": null, "num_lines": null }
"""AbstractBot""" import random import re # encoding=utf8 class AbstractBot(object): """Generic bot""" def __init__(self, trigger_word, provider, auto_feed, meme_provider, join_every): #Setup class props self.chat_count = {} self.participation_frequency = 5 self.trigger_word = trigger_word self.provider = provider self.auto_feed = auto_feed self.meme_provider = meme_provider self.join_every = join_every def process_incoming_message(self, chat_id, text): """Incoming message handlerr""" regex_trigger = re.compile("(" + self.trigger_word + ")", re.IGNORECASE) #Reload bot if text == "bot reload": self.provider.load() return "Reloaded", None #Bot match elif regex_trigger.findall(text): self.chat_count[chat_id] = 0 text = regex_trigger.sub('', text) text = self.provider.get_message(text) #Meme check meme = self.meme_provider.get_random_meme(text) return text, meme #Save new data else: chat_count_dic = self.chat_count if not chat_id in chat_count_dic: chat_count_dic[chat_id] = 0 chat_count_dic[chat_id] = chat_count_dic[chat_id] + 1 ##He'll join after a random number of messages between 5 and 15 frequency = random.randint(self.join_every, self.join_every * 2) if chat_count_dic[chat_id] > frequency: chat_count_dic[chat_id] = 0 text = self.provider.get_message(text) return text, None if self.auto_feed: self.provider.text_provider.add_text(text)
{ "repo_name": "kblok/TelegramBotFriend", "path": "abstract_bot.py", "copies": "1", "size": "1767", "license": "mit", "hash": -2740950448365965000, "line_mean": 31.7222222222, "line_max": 85, "alpha_frac": 0.5664968874, "autogenerated": false, "ratio": 3.8580786026200875, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4924575490020088, "avg_score": null, "num_lines": null }
"""Abstract boto-based datastore. The boto_ library provides interfaces to both Amazon S3 and Google Storage for Developers. This abstract base class gets most of the common work done. .. note:: **Installation**: Use of this module requires the open source boto_ package. .. _boto: http://code.google.com/p/boto/ """ from cloud_browser.app_settings import settings from cloud_browser.cloud import errors, base from cloud_browser.common import ROOT, SEP, requires, dt_from_header ############################################################################### # Constants / Conditional Imports ############################################################################### try: import boto # pylint: disable=F0401 except ImportError: boto = None # pylint: disable=C0103 ############################################################################### # Classes ############################################################################### # pylint: disable=invalid-name def boto_server_client_error_wrapper(operation): """Exception wrapper for catching BotoClientError, BotoServerError and NoObjectException. """ import sys from boto.exception import BotoClientError, BotoServerError def wrapped(*args, **kwargs): try: return operation(*args, **kwargs) except BotoServerError as error: if error.status == 404: raise errors.NoObjectException, \ errors.NoObjectException(error), \ sys.exc_info()[2] else: raise errors.StorageResponseException, \ errors.StorageResponseException(error), \ sys.exc_info()[2] except BotoClientError as error: raise errors.ClientException, \ errors.ClientException(error), \ sys.exc_info()[2] return wrapped class BotoExceptionWrapper(errors.CloudExceptionWrapper): """Boto :mod:`boto` exception translator.""" error_cls = errors.CloudException @requires(boto, 'boto') def translate(self, exc): """Return whether or not to do translation.""" from boto.exception import StorageResponseError if isinstance(exc, StorageResponseError): if exc.status == 404: return self.error_cls(unicode(exc)) return None class BotoKeyWrapper(errors.CloudExceptionWrapper): """Boto :mod:`boto` key exception translator.""" error_cls = errors.NoObjectException class BotoBucketWrapper(errors.CloudExceptionWrapper): """Boto :mod:`boto` bucket exception translator.""" error_cls = errors.NoContainerException class BotoObject(base.CloudObject): """Boto 'key' object wrapper.""" #: Exception translations. wrap_boto_errors = BotoKeyWrapper() @classmethod def is_key(cls, result): """Return ``True`` if result is a key object.""" raise NotImplementedError @classmethod def is_prefix(cls, result): """Return ``True`` if result is a prefix object.""" raise NotImplementedError @wrap_boto_errors def _get_object(self): """Return native storage object.""" return self.container.native_container.get_key(self.name) @wrap_boto_errors def _read(self): """Return contents of object.""" return self.native_obj.read() @classmethod def from_result(cls, container, result): """Create from ambiguous result.""" if result is None: raise errors.NoObjectException elif cls.is_prefix(result): return cls.from_prefix(container, result) elif cls.is_key(result): return cls.from_key(container, result) raise errors.CloudException("Unknown boto result type: %s" % type(result)) @classmethod def from_prefix(cls, container, prefix): """Create from prefix object.""" if prefix is None: raise errors.NoObjectException return cls(container, name=prefix.name, obj_type=cls.type_cls.SUBDIR) @classmethod def from_key(cls, container, key): """Create from key object.""" if key is None: raise errors.NoObjectException last_modified = dt_from_header(key.last_modified) \ if key.last_modified else None # Get Key (1123): Tue, 13 Apr 2010 14:02:48 GMT # List Keys (8601): 2010-04-13T14:02:48.000Z return cls(container, name=key.name, size=key.size, content_type=key.content_type, content_encoding=key.content_encoding, last_modified=last_modified, obj_type=cls.type_cls.FILE) class BotoContainer(base.CloudContainer): """Boto container wrapper.""" #: Storage object child class. obj_cls = BotoObject #: Exception translations. wrap_boto_errors = BotoBucketWrapper() #: Maximum number of objects that can be listed or ``None``. #: #: :mod:`boto` transparently pages through objects, so there is no real #: limit to the number of object that can be displayed. However, for #: practical reasons, we'll limit it to the same as Rackspace. max_list = 10000 def get_safe_special_characters(self): """Object name safe characters. :rtype: ``str`` """ return "!\-_.*'()" # pylint: disable=anomalous-backslash-in-string @wrap_boto_errors def _get_container(self): """Return native container object.""" return self.conn.native_conn.get_bucket(self.name) @boto_server_client_error_wrapper def get_objects(self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT): """Get objects.""" from itertools import islice path = path.rstrip(SEP) + SEP if path else path result_set = self.native_container.list(path, SEP, marker) # Get +1 results because marker and first item can match as we strip # the separator from results obscuring things. No real problem here # because boto masks any real request limits. results = list(islice(result_set, limit+1)) if results: if marker and results[0].name.rstrip(SEP) == marker.rstrip(SEP): results = results[1:] else: results = results[:limit] return [self.obj_cls.from_result(self, r) for r in results] @wrap_boto_errors def get_object(self, path): """Get single object.""" key = self.native_container.get_key(path) return self.obj_cls.from_key(self, key) def has_directory(self, path): """Check the directory exists or not. This method checks if there are keys whose name start with "path". If None, raise exception. :param path: A string. """ results = self.native_container.get_all_keys(prefix=path) if len(results)==0: raise errors.NoObjectException return True @boto_server_client_error_wrapper def get_directories_paths(self): """Get all the directories paths in the given container. :rtype: ``list[str]`` """ dirs_paths = [ROOT] prefixes = [ROOT] while prefixes: prefix = prefixes.pop() results = self.native_container.get_all_keys( prefix=prefix, delimiter=SEP) for result in results: if self.obj_cls.is_prefix(result): dirs_paths.append(result.name) prefixes.append(result.name) return sorted(set(dirs_paths)) @boto_server_client_error_wrapper def _get_key_objects(self, path): """Get all the keys of files and subdirectories under the given path. This method is different from get_objects(). get_objects() returns key and prefix objects, and a directory is regarded as a prefix object. For _get_key_objects(), the elements' return type is boto Key object. :param path: A string. :return: A list of instances of boto Key objects. """ keys = [] has_more = True marker = path while has_more: current_keys = self.native_container.get_all_keys(marker=marker, prefix=path) if len(current_keys) == 0: return keys has_more = len(current_keys) == 1000 marker = current_keys[-1].name keys += current_keys return keys @boto_server_client_error_wrapper def filter_objects(self, objects): """Remove NoneType key objects from the objects list, which should be regarded as the parent directory. Set the user-defined the metadata. Django-cloud-browser wraps boto object with AwsObject. There are two types boto objects, Key and Prefix. Key object is regarded as a file and Prefix object is regarded as directory. Like AWS S3 console, we use a key (whose name ends with "/") to mock a directory so that we can have a hierarchical file system. The goal of this function is to remove the mock-directory key objects and set the properties of the Prefix objects from their corresponding Key objects, so that the browser can display a correct hierachical file system. :param objects: A list of AwsObject objects. :return: A list of AwsObjects objects. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ object_to_be_removed = [] index = 0 for obj in objects: # Set metatdata last_modified for is_subdir object which has # corresponding real key stored in S3. key = None if obj.is_subdir: key = self.native_container.get_key("{}/".format(obj.name)) if key: # is_subdir object does not have last_modified metadata # when initilized, use the metadata from coressponding boto # key here. obj.last_modified = dt_from_header(key.last_modified) \ if key.last_modified else None else: key = self.native_container.get_key(obj.name) # Remove directory key. if key is None: object_to_be_removed.append(index) # Retreive 'modified-by' for is_file and is_subdir objects. if key: obj.modified_by = key.metadata.get('modified-by', 'unknown') index += 1 return [objects[i] for i in range(0, len(objects)) if i not in object_to_be_removed] def is_safe_basename(self, base_name): """Verify that the base_name string path contains only safe characters ([0-9a-zA-Z], !, -, _, ., *, ', (, )). :param basename: A string. :return: ``True`` if key name string does not contain any unsafe characters. :rtype: ``bool`` """ import re # Space is a valid character # pylint: disable=anomalous-backslash-in-string if re.match("[a-zA-Z0-9!\-_.*'() ]+$", base_name): return True return False @boto_server_client_error_wrapper def mkdir(self, dir_path, username=None): """Create a new subdirectory under dir_path and set user-defined metadata: modified-by. :param dir_name: A string. To differentiate file and directory, the key name of a directory ends with "/", for example, "foo/bar/". :param username: A string. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ from boto.s3.key import Key key = Key(self.native_container, dir_path) if username: key.set_metadata('modified-by', username) key.set_contents_from_string('It is a directory.') key.set_acl('public-read') return self.obj_cls.from_key(self, key) def _delete_directory(self, dir_src_path): """Delete the directory and all of the files and subdirectories under it. :param subdir_src_path: A string ends with "/". :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ keys = self._get_key_objects(dir_src_path) # Delete all the files and sub-dirs # pylint: disable=expression-not-assigned [key.delete() for key in keys if key] # Delete the directory itself if all of the files and sub-dirs are # successfully deleted key = self.native_container.get_key(dir_src_path) if key: return self.obj_cls.from_key(self, key.delete()) else: raise errors.NoObjectException( "{} does not exist".format(dir_src_path)) @boto_server_client_error_wrapper def delete(self, src_path, is_file): """If src_path is a file, delete it. If it's a directory, delete all paths under it and itself. :param src_path: A string. :param is_file: A boolean indicating the target AwsObject is a file or not. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ if is_file: key = self.native_container.get_key(src_path) if key: return self.obj_cls.from_key(self, key.delete()) else: raise errors.NoObjectException( "{} does not exist".format(src_path)) else: return self._delete_directory("{}/".format(src_path)) def _rename_object(self, parent_dir_path, src_path, new_basename): """Rename a file. Cause AWS S3 is a key value store, this method can also be used to rename a directory, which key ends with "/". :param parent_dir_path: A string ends with "/", for example "foo/bar/", or ROOT directory, "". :param src_path: A string, for example a file "foo/bar/baz" or a directory "foo/bar/baz/". :param new_basename: A string. If renaming a file: "baz-rename", or a directory: "baz-rename/". :return: If the file is successfully renamed (copy and delete the original key). :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ renamed = self.native_container.copy_key( parent_dir_path + new_basename, self.native_container.name, src_path, preserve_acl=True) key = self.native_container.get_key(src_path) if key: key.delete() return renamed def _rename_directory(self, parent_dir_path, dir_src_path, new_basename): """Rename the directory and all of the files and subdirectories under it. :param parent_dir_path. A string, for example "foo/bar/". :param dir_src_path. A string, for example "foo/bar/baz/". :param new_basename. A string, for example "baz-rename". :return: Renamed object, if the directory and all of the files and subdirectories under it are successfully renamed. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ from boto.exception import BotoServerError # Rename all the files and subdirectories under the target directory. # While error occurs, continue renaming only if 'key does not exist'. keys = self._get_key_objects(dir_src_path) for key in keys: try: self._rename_object( "{}{}/".format(parent_dir_path, new_basename), key.name, key.name[len(dir_src_path):]) except BotoServerError as error: if error.status == 404: pass # If directory key exists, rename the directory itself and return the # renamed directory key. Otherwise, change the key name of first item # in '_get_key_objects' to the new directory name. Because in `rename`, # renaming a directory returns a Prefix object, class variable 'name' # is required. try: self.get_object(dir_src_path) return self._rename_object( parent_dir_path, dir_src_path, "{}/".format(new_basename)) except errors.NoObjectException: keys[0].name = "{}{}".format(parent_dir_path, new_basename) return keys[0] @boto_server_client_error_wrapper def rename(self, parent_dir_path, src_path, new_basename, is_file): """If src_path is a file, rename it. If it's a directory, rename all paths under it and itself. :param parent_dir_path: A string ends with "/" (excludes ROOT), for example "foo/bar/". :param src_path: A string, for example "foo/bar/baz". If renaming a directory, a delimiter "/" will be appened to the src_key_name. (The path of an CloudObject does not end with "/". That an instance of a CloudObject is a directory or is a file is determined by "is_file" or "is_subdir" property) :param new_basename: A string, for example "baz-rename". It does not contain key prefix. :param is_file: A boolean indicating the target CloudObject is a file or not. :return: If successfully rename the src key. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ if is_file: return self.obj_cls.from_key( self, self._rename_object(parent_dir_path, src_path, new_basename)) else: return self.obj_cls.from_prefix( self, self._rename_directory(parent_dir_path, "{}/".format(src_path), new_basename)) @boto_server_client_error_wrapper def move(self, src_file_path, target_dir_path): """Move the file to the target directory. :param src_file_path: A string ends with "/" (excludes ROOT). :param target_dir_path: A string. :return: If successfully move the key to the target directory. :rtype: :class:`cloud_browser.cloud.aws.AwsObject` """ moved = self.native_container.copy_key( target_dir_path + src_file_path.split("/")[-1], self.native_container.name, src_file_path, preserve_acl=True) key = self.native_container.get_key(src_file_path) if key: key.delete() return self.obj_cls.from_key(self, moved) @classmethod def from_bucket(cls, connection, bucket): """Create from bucket object.""" if bucket is None: raise errors.NoContainerException # It appears that Amazon does not have a single-shot REST query to # determine the number of keys / overall byte size of a bucket. return cls(connection, bucket.name) class BotoConnection(base.CloudConnection): """Boto connection wrapper.""" #: Container child class. cont_cls = BotoContainer #: Exception translations. wrap_boto_errors = BotoBucketWrapper() def _get_connection(self): """Return native connection object.""" raise NotImplementedError("Must create boto connection.") @wrap_boto_errors def _get_containers(self): """Return available containers.""" buckets = self.native_conn.get_all_buckets() return [self.cont_cls.from_bucket(self, b) for b in buckets] @wrap_boto_errors def _get_container(self, path): """Return single container.""" bucket = self.native_conn.get_bucket(path) return self.cont_cls.from_bucket(self, bucket) def get_upload_form(self, *args, **kwargs): """Return html format upload form.""" raise NotImplementedError
{ "repo_name": "lantip/aws-filemanager", "path": "cloud_browser/cloud/boto_base.py", "copies": "1", "size": "20265", "license": "mit", "hash": 2009144989407026000, "line_mean": 35.3824057451, "line_max": 79, "alpha_frac": 0.5843079201, "autogenerated": false, "ratio": 4.312619706320493, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5396927626420494, "avg_score": null, "num_lines": null }
# Abstract Building class Pizza: def __init__(self, inches): self.ingredients = ["salsa de tomate", "queso"] self.inches = inches def __str__(self): message = f'Mi pizza es de {self.inches}" con los siguientes ingredientes: ' + ', '.join(self.ingredients) # Replace the last comma with "y" last_comma = message.rfind(",") return message[:last_comma] + " y" + message[last_comma+1:] # Builder. class PizzaBuilder: def __init__(self, inches: int): self.pizza = Pizza(inches) def addCheese(self): self.pizza.ingredients.append('doble queso') return self def addPepperoni(self): self.pizza.ingredients.append('pepperoni') return self def addSalami(self): self.pizza.ingredients.append('salami') return self def addPimientos(self): self.pizza.ingredients.append('pimientos') return self def addCebolla(self): self.pizza.ingredients.append('cebolla') return self def addChampiñones(self): self.pizza.ingredients.append('champiñones') return self def build(self): return self.pizza
{ "repo_name": "AnhellO/DAS_Sistemas", "path": "Ago-Dic-2020/rodriguez-martinez-jesus-angel/primer-parcial/builder.py", "copies": "1", "size": "1125", "license": "mit", "hash": 5899140755982430000, "line_mean": 31.1142857143, "line_max": 110, "alpha_frac": 0.6500445236, "autogenerated": false, "ratio": 3.09366391184573, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.424370843544573, "avg_score": null, "num_lines": null }
"""Abstract class-based views""" import json from django.contrib.auth import get_user_model from django.http import HttpResponse from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_protect from django.views.generic import View from django.utils.translation import ugettext as __ from collective_blog.utils.errors import PermissionCheckFailed User = get_user_model() @method_decorator(csrf_protect, 'dispatch') class VoteView(View): # A model through which the votes counted (derived from the `AbstractVote`) model = None # An object that is being voted (set this up in the `get_object` method) object = None # A method that will be used to serialize the response data serialize = staticmethod(json.dumps) def get_score(self): """Reload this to use a cache field instead of dynamic lookup""" return self.model.objects.filter(object=self.object).score() def get_object(self, *args, **kwargs): raise NotImplementedError() def post(self, request, *args, **kwargs): if not request.is_ajax(): return HttpResponse('This page is ajax-only', status=418) try: try: vote = int(request.GET['vote']) assert vote in [-1, 0, 1] except (ValueError, AssertionError, KeyError): raise PermissionCheckFailed(__('Wrong vote')) self.object = self.get_object(*args, **kwargs) self.model.vote_for(request.user, self.object, vote) data = {'score': self.get_score(), 'state': vote} return HttpResponse(self.serialize(data)) except PermissionCheckFailed as e: return HttpResponse(e.note, status=400)
{ "repo_name": "AmatanHead/collective-blog", "path": "s_voting/views.py", "copies": "1", "size": "1760", "license": "mit", "hash": -5200476928130725000, "line_mean": 34.2, "line_max": 79, "alpha_frac": 0.6647727273, "autogenerated": false, "ratio": 4.200477326968974, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5365250054268974, "avg_score": null, "num_lines": null }
"""Abstract class defining cache of e-mail messages.""" import abc import typing as t from .message import Message from .folder import Folder class EmailCache(metaclass=abc.ABCMeta): """An object that stores e-mail messages.""" def __init__(self): self.folders = {} # type: t.Dict[str, Folder] # self.message_ids = {} # type: t.Mapping[str, t.List[int]] # self.messages = {} # type: t.Mapping[t.Tuple[str, int], Message] @abc.abstractmethod def update_folders(self): """Rebuild the folders dictionary.""" ... # @abc.abstractmethod # def retrieve_messages( # self, message_ids: t.List[int], folder: t.Optional[str] = None) -> t.List[Message]: # pass # # @abc.abstractmethod # def retrieve_message(self, message_id: int, folder: t.Optional[str] = None) -> Message: # pass def update_messages(self): for _, folder in self.folders.items(): self.update_messages_in(folder) @abc.abstractmethod def update_messages_in(self, folder: Folder): """Update messages stored in a given folder.""" ... def update(self): self.update_folders() self.update_messages()
{ "repo_name": "mbdevpl/maildaemon", "path": "maildaemon/email_cache.py", "copies": "1", "size": "1234", "license": "apache-2.0", "hash": -841956538512507100, "line_mean": 27.6976744186, "line_max": 97, "alpha_frac": 0.6061588331, "autogenerated": false, "ratio": 3.7737003058103977, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9877435606409817, "avg_score": 0.00048470650011612755, "num_lines": 43 }
"""Abstract classes and utility operations for building graph representations and data loaders (known as Sequence objects in Keras). Most users will not need to interact with this module.""" from abc import abstractmethod from inspect import signature from operator import itemgetter from typing import Union, Dict, List, Any import numpy as np from monty.json import MSONable from tensorflow.keras.utils import Sequence from pymatgen.core import Structure from pymatgen.analysis.local_env import NearNeighbors from megnet.data import local_env from megnet.utils.data import get_graphs_within_cutoff from megnet.utils.general import expand_1st, to_list class Converter(MSONable): """ Base class for atom or bond converter """ def convert(self, d: Any) -> Any: """ Convert the object d Args: d (Any): Any object d Returns: returned object """ raise NotImplementedError class StructureGraph(MSONable): """ This is a base class for converting converting structure into graphs or model inputs Methods to be implemented are follows: 1. convert(self, structure) This is to convert a structure into a graph dictionary 2. get_input(self, structure) This method convert a structure directly to a model input 3. get_flat_data(self, graphs, targets) This method process graphs and targets pairs and output model input list. """ def __init__( self, nn_strategy: Union[str, NearNeighbors] = None, atom_converter: Converter = None, bond_converter: Converter = None, **kwargs, ): """ Args: nn_strategy (str or NearNeighbors): NearNeighbor strategy atom_converter (Converter): atom converter bond_converter (Converter): bond converter **kwargs: """ if isinstance(nn_strategy, str): strategy = local_env.get(nn_strategy) parameters = signature(strategy).parameters param_dict = {i: j.default for i, j in parameters.items()} for i, j in kwargs.items(): if i in param_dict: setattr(self, i, j) param_dict.update({i: j}) self.nn_strategy = strategy(**param_dict) elif isinstance(nn_strategy, NearNeighbors): self.nn_strategy = nn_strategy elif nn_strategy is None: self.nn_strategy = None else: raise RuntimeError("Strategy not valid") self.atom_converter = atom_converter or self._get_dummy_converter() self.bond_converter = bond_converter or self._get_dummy_converter() def convert(self, structure: Structure, state_attributes: List = None) -> Dict: """ Take a pymatgen structure and convert it to a index-type graph representation The graph will have node, distance, index1, index2, where node is a vector of Z number of atoms in the structure, index1 and index2 mark the atom indices forming the bond and separated by distance. For state attributes, you can set structure.state = [[xx, xx]] beforehand or the algorithm would take default [[0, 0]] Args: state_attributes: (list) state attributes structure: (pymatgen structure) (dictionary) """ state_attributes = ( state_attributes or getattr(structure, "state", None) or np.array([[0.0, 0.0]], dtype="float32") ) index1 = [] index2 = [] bonds = [] if self.nn_strategy is None: raise RuntimeError("NearNeighbor strategy is not provided!") for n, neighbors in enumerate(self.nn_strategy.get_all_nn_info(structure)): index1.extend([n] * len(neighbors)) for neighbor in neighbors: index2.append(neighbor["site_index"]) bonds.append(neighbor["weight"]) atoms = self.get_atom_features(structure) if np.size(np.unique(index1)) < len(atoms): raise RuntimeError("Isolated atoms found in the structure") return {"atom": atoms, "bond": bonds, "state": state_attributes, "index1": index1, "index2": index2} @staticmethod def get_atom_features(structure) -> List[Any]: """ Get atom features from structure, may be overwritten Args: structure: (Pymatgen.Structure) pymatgen structure Returns: List of atomic numbers """ return np.array([i.specie.Z for i in structure], dtype="int32").tolist() def __call__(self, structure: Structure) -> Dict: """ Directly apply the converter to structure, alias to convert Args: structure (Structure): input structure Returns (dict): graph dictionary """ return self.convert(structure) def get_input(self, structure: Structure) -> List[np.ndarray]: """ Turns a structure into model input """ graph = self.convert(structure) return self.graph_to_input(graph) def graph_to_input(self, graph: Dict) -> List[np.ndarray]: """ Turns a graph into model input Args: (dict): Dictionary description of the graph Return: ([np.ndarray]): Inputs in the form needed by MEGNet """ gnode = [0] * len(graph["atom"]) gbond = [0] * len(graph["index1"]) return [ expand_1st(self.atom_converter.convert(graph["atom"])), expand_1st(self.bond_converter.convert(graph["bond"])), expand_1st(np.array(graph["state"])), expand_1st(np.array(graph["index1"], dtype=np.int32)), expand_1st(np.array(graph["index2"], dtype=np.int32)), expand_1st(np.array(gnode, dtype=np.int32)), expand_1st(np.array(gbond, dtype=np.int32)), ] @staticmethod def get_flat_data(graphs: List[Dict], targets: List = None) -> tuple: """ Expand the graph dictionary to form a list of features and targets tensors. This is useful when the model is trained on assembled graphs on the fly. Args: graphs: (list of dictionary) list of graph dictionary for each structure targets: (list of float or list) Optional: corresponding target values for each structure Returns: tuple(node_features, edges_features, global_values, index1, index2, targets) """ output = [] # Will be a list of arrays # Convert the graphs to matrices for feature in ["atom", "bond", "state", "index1", "index2"]: output.append([np.array(x[feature]) for x in graphs]) # If needed, add the targets if targets is not None: output.append([to_list(t) for t in targets]) return tuple(output) @staticmethod def _get_dummy_converter() -> "DummyConverter": return DummyConverter() def as_dict(self) -> Dict: """ Serialize to dict Returns: (dict) dictionary of information """ all_dict = super().as_dict() if "nn_strategy" in all_dict: nn_strategy = all_dict.pop("nn_strategy") all_dict.update({"nn_strategy": local_env.serialize(nn_strategy)}) return all_dict @classmethod def from_dict(cls, d: Dict) -> "StructureGraph": """ Initialization from dictionary Args: d (dict): dictionary Returns: StructureGraph object """ if "nn_strategy" in d: nn_strategy = d.pop("nn_strategy") nn_strategy_obj = local_env.deserialize(nn_strategy) d.update({"nn_strategy": nn_strategy_obj}) return super().from_dict(d) return super().from_dict(d) class StructureGraphFixedRadius(StructureGraph): """ This one uses a short cut to call find_points_in_spheres cython function in pymatgen. It is orders of magnitude faster than previous implementations """ def convert(self, structure: Structure, state_attributes: List = None) -> Dict: """ Take a pymatgen structure and convert it to a index-type graph representation The graph will have node, distance, index1, index2, where node is a vector of Z number of atoms in the structure, index1 and index2 mark the atom indices forming the bond and separated by distance. For state attributes, you can set structure.state = [[xx, xx]] beforehand or the algorithm would take default [[0, 0]] Args: state_attributes: (list) state attributes structure: (pymatgen structure) (dictionary) """ state_attributes = ( state_attributes or getattr(structure, "state", None) or np.array([[0.0, 0.0]], dtype="float32") ) atoms = self.get_atom_features(structure) index1, index2, _, bonds = get_graphs_within_cutoff(structure, self.nn_strategy.cutoff) if np.size(np.unique(index1)) < len(atoms): raise RuntimeError("Isolated atoms found in the structure") return {"atom": atoms, "bond": bonds, "state": state_attributes, "index1": index1, "index2": index2} @classmethod def from_structure_graph(cls, structure_graph: StructureGraph) -> "StructureGraphFixedRadius": """ Initialize from pymatgen StructureGraph Args: structure_graph (StructureGraph): pymatgen StructureGraph object Returns: StructureGraphFixedRadius object """ return cls( nn_strategy=structure_graph.nn_strategy, atom_converter=structure_graph.atom_converter, bond_converter=structure_graph.bond_converter, ) class DummyConverter(Converter): """ Dummy converter as a placeholder """ def convert(self, d: Any) -> Any: """ Dummy convert, does nothing to input Args: d (Any): input object Returns: d """ return d class EmbeddingMap(Converter): """ Convert an integer to a row vector in a feature matrix """ def __init__(self, feature_matrix: np.ndarray): """ Args: feature_matrix: (np.ndarray) A matrix of shape (N, M) """ self.feature_matrix = np.array(feature_matrix) def convert(self, int_array: np.ndarray) -> np.ndarray: """ convert atomic number to row vectors in the feature_matrix Args: int_array: (1d array) number array of length L Returns (matrix) L*M matrix with N the length of d and M the length of centers """ return self.feature_matrix[int_array] class GaussianDistance(Converter): """ Expand distance with Gaussian basis sit at centers and with width 0.5. """ def __init__(self, centers: np.ndarray = np.linspace(0, 5, 100), width=0.5): """ Args: centers: (np.array) centers for the Gaussian basis width: (float) width of Gaussian basis """ self.centers = centers self.width = width def convert(self, d: np.ndarray) -> np.ndarray: """ expand distance vector d with given parameters Args: d: (1d array) distance array Returns (matrix) N*M matrix with N the length of d and M the length of centers """ d = np.array(d) return np.exp(-((d[:, None] - self.centers[None, :]) ** 2) / self.width ** 2) class BaseGraphBatchGenerator(Sequence): """Base class for classes that generate batches of training data for MEGNet. Based on the Sequence class, which is the data loader equivalent for Keras. Implementations of this base class must implement the :meth:`_generate_inputs`, which generates the lists of graph descriptions for a batch. The :meth:`process_atom_features` function and related functions are used to modify the features for each atom, bond, and global features when creating a batch. """ def __init__( self, dataset_size: int, targets: np.ndarray, sample_weights: np.ndarray = None, batch_size: int = 128, is_shuffle: bool = True, ): """ Args: dataset_size (int): Number of entries in dataset targets (ndarray): Feature to be predicted for each network sample_weights (npdarray): sample weights batch_size (int): Maximum batch size is_shuffle (bool): Whether to shuffle the data after each step """ if targets is not None: self.targets = np.array(targets).reshape((dataset_size, -1)) else: self.targets = None if sample_weights is not None: self.sample_weights = np.array(sample_weights) else: self.sample_weights = None self.batch_size = batch_size self.total_n = dataset_size self.is_shuffle = is_shuffle self.max_step = int(np.ceil(self.total_n / batch_size)) self.mol_index = np.arange(self.total_n) if self.is_shuffle: self.mol_index = np.random.permutation(self.mol_index) def __len__(self) -> int: return self.max_step def _combine_graph_data( self, feature_list_temp: List[np.ndarray], connection_list_temp: List[np.ndarray], global_list_temp: List[np.ndarray], index1_temp: List[np.ndarray], index2_temp: List[np.ndarray], ) -> tuple: """Compile the matrices describing each graph into single matrices for the entire graph Beyond concatenating the graph descriptions, this operation updates the indices of each node to be sequential across all graphs so they are not duplicated between graphs Args: feature_list_temp ([ndarray]): List of features for each node connection_list_temp ([ndarray]): List of features for each connection global_list_temp ([ndarray]): List of global state for each graph index1_temp ([ndarray]): List of indices for the start of each bond index2_temp ([ndarray]): List of indices for the end of each bond Returns: (tuple): Input arrays describing the entire batch of networks: - ndarray: Features for each node - ndarray: Features for each connection - ndarray: Global state for each graph - ndarray: Indices for the start of each bond - ndarray: Indices for the end of each bond - ndarray: Index of graph associated with each node - ndarray: Index of graph associated with each connection """ # get atom's structure id gnode = [] for i, j in enumerate(feature_list_temp): gnode += [i] * len(j) # get bond features from a batch of structures # get bond's structure id gbond = [] for i, j in enumerate(connection_list_temp): gbond += [i] * len(j) # assemble atom features together feature_list_temp = np.concatenate(feature_list_temp, axis=0) feature_list_temp = self.process_atom_feature(feature_list_temp) # assemble bond feature together connection_list_temp = np.concatenate(connection_list_temp, axis=0) connection_list_temp = self.process_bond_feature(connection_list_temp) # assemble state feature together global_list_temp = np.concatenate(global_list_temp, axis=0) global_list_temp = self.process_state_feature(global_list_temp) # assemble bond indices index1 = [] index2 = [] offset_ind = 0 for ind1, ind2 in zip(index1_temp, index2_temp): index1 += [i + offset_ind for i in ind1] index2 += [i + offset_ind for i in ind2] offset_ind += max(ind1) + 1 # Compile the inputs in needed order inputs = ( expand_1st(feature_list_temp), expand_1st(connection_list_temp), expand_1st(global_list_temp), expand_1st(np.array(index1, dtype=np.int32)), expand_1st(np.array(index2, dtype=np.int32)), expand_1st(np.array(gnode, dtype=np.int32)), expand_1st(np.array(gbond, dtype=np.int32)), ) return inputs def on_epoch_end(self): """ code to be executed on epoch end """ if self.is_shuffle: self.mol_index = np.random.permutation(self.mol_index) def process_atom_feature(self, x: np.ndarray) -> np.ndarray: """ Args: x (np.ndarray): atom features Returns: processed atom features """ return x def process_bond_feature(self, x: np.ndarray) -> np.ndarray: """ Args: x (np.ndarray): bond features Returns: processed bond features """ return x def process_state_feature(self, x: np.ndarray) -> np.ndarray: """ Args: x (np.ndarray): state features Returns: processed state features """ return x def __getitem__(self, index: int) -> tuple: # Get the indices for this batch batch_index = self.mol_index[index * self.batch_size : (index + 1) * self.batch_size] # Get the inputs for each batch inputs = self._generate_inputs(batch_index) # Make the graph data inputs = self._combine_graph_data(*inputs) # Return the batch if self.targets is None: return inputs # get targets target_temp = itemgetter_list(self.targets, batch_index) target_temp = np.atleast_2d(target_temp) if self.sample_weights is None: return inputs, expand_1st(target_temp) sample_weights_temp = itemgetter_list(self.sample_weights, batch_index) # sample_weights_temp = np.atleast_2d(sample_weights_temp) return inputs, expand_1st(target_temp), expand_1st(sample_weights_temp) @abstractmethod def _generate_inputs(self, batch_index: list) -> tuple: """Get the graph descriptions for each batch Args: batch_index ([int]): List of indices for training batch Returns: (tuple): Input arrays describing each network: - [ndarray]: List of features for each node - [ndarray]: List of features for each connection - [ndarray]: List of global state for each graph - [ndarray]: List of indices for the start of each bond - [ndarray]: List of indices for the end of each bond """ pass class GraphBatchGenerator(BaseGraphBatchGenerator): """ A generator class that assembles several structures (indicated by batch_size) and form (x, y) pairs for model training. """ def __init__( self, atom_features: List[np.ndarray], bond_features: List[np.ndarray], state_features: List[np.ndarray], index1_list: List[int], index2_list: List[int], targets: np.ndarray = None, sample_weights: np.ndarray = None, batch_size: int = 128, is_shuffle: bool = True, ): """ Args: atom_features: (list of np.array) list of atom feature matrix, bond_features: (list of np.array) list of bond features matrix state_features: (list of np.array) list of [1, G] state features, where G is the global state feature dimension index1_list: (list of integer) list of (M, ) one side atomic index of the bond, M is different for different structures index2_list: (list of integer) list of (M, ) the other side atomic index of the bond, M is different for different structures, but it has to be the same as the corresponding index1. targets: (numpy array), N*1, where N is the number of structures sample_weights: (numpy array), N*1, where N is the number of structures batch_size: (int) number of samples in a batch """ super().__init__( len(atom_features), targets, sample_weights=sample_weights, batch_size=batch_size, is_shuffle=is_shuffle ) self.atom_features = atom_features self.bond_features = bond_features self.state_features = state_features self.index1_list = index1_list self.index2_list = index2_list def _generate_inputs(self, batch_index: list) -> tuple: """Get the graph descriptions for each batch Args: batch_index ([int]): List of indices for training batch Returns: (tuple): Input arrays describe each network: - [ndarray]: List of features for each nodes - [ndarray]: List of features for each connection - [ndarray]: List of global state for each graph - [ndarray]: List of indices for the start of each bond - [ndarray]: List of indices for the end of each bond """ # Get the features and connectivity lists for this batch feature_list_temp = itemgetter_list(self.atom_features, batch_index) connection_list_temp = itemgetter_list(self.bond_features, batch_index) global_list_temp = itemgetter_list(self.state_features, batch_index) index1_temp = itemgetter_list(self.index1_list, batch_index) index2_temp = itemgetter_list(self.index2_list, batch_index) return feature_list_temp, connection_list_temp, global_list_temp, index1_temp, index2_temp class GraphBatchDistanceConvert(GraphBatchGenerator): """ Generate batch of structures with bond distance being expanded using a Expansor """ def __init__( self, atom_features: List[np.ndarray], bond_features: List[np.ndarray], state_features: List[np.ndarray], index1_list: List[int], index2_list: List[int], targets: np.ndarray = None, sample_weights: np.ndarray = None, batch_size: int = 128, is_shuffle: bool = True, distance_converter: Converter = None, ): """ Args: atom_features: (list of np.array) list of atom feature matrix, bond_features: (list of np.array) list of bond features matrix state_features: (list of np.array) list of [1, G] state features, where G is the global state feature dimension index1_list: (list of integer) list of (M, ) one side atomic index of the bond, M is different for different structures index2_list: (list of integer) list of (M, ) the other side atomic index of the bond, M is different for different structures, but it has to be the same as the correponding index1. targets: (numpy array), N*1, where N is the number of structures sample_weights: (numpy array), N*1, where N is the number of structures batch_size: (int) number of samples in a batch is_shuffle: (bool) whether to shuffle the structure, default to True distance_converter: (bool) converter for processing the distances """ super().__init__( atom_features=atom_features, bond_features=bond_features, state_features=state_features, index1_list=index1_list, index2_list=index2_list, targets=targets, sample_weights=sample_weights, batch_size=batch_size, is_shuffle=is_shuffle, ) if distance_converter is None: raise ValueError("Distance converter cannot be None") self.distance_converter = distance_converter def process_bond_feature(self, x) -> np.ndarray: """ Convert bond distances into Gaussian expanded vectors Args: x (np.ndarray): input distance array Returns: expanded matrix """ return self.distance_converter.convert(x) def itemgetter_list(data_list: List, indices: List) -> tuple: """ Get indices of data_list and return a tuple Args: data_list (list): data list indices: (list) indices Returns: (tuple) """ it = itemgetter(*indices) if np.size(indices) == 1: return (it(data_list),) return it(data_list)
{ "repo_name": "materialsvirtuallab/megnet", "path": "megnet/data/graph.py", "copies": "1", "size": "24856", "license": "bsd-3-clause", "hash": -2417267303900003000, "line_mean": 36.153961136, "line_max": 116, "alpha_frac": 0.6013035082, "autogenerated": false, "ratio": 4.275197798417612, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0007595906918540791, "num_lines": 669 }
"""Abstract classes.""" from __future__ import absolute_import, unicode_literals import abc from collections import Callable from .five import with_metaclass __all__ = ['Thenable'] @with_metaclass(abc.ABCMeta) class Thenable(Callable): # pragma: no cover """Object that supports ``.then()``.""" __slots__ = () @abc.abstractmethod def then(self, on_success, on_error=None): raise NotImplementedError() @abc.abstractmethod def throw(self, exc=None, tb=None, propagate=True): raise NotImplementedError() @abc.abstractmethod def cancel(self): raise NotImplementedError() @classmethod def __subclasshook__(cls, C): if cls is Thenable: if any('then' in B.__dict__ for B in C.__mro__): return True return NotImplemented @classmethod def register(cls, other): # overide to return other so `register` can be used as a decorator type(cls).register(cls, other) return other @Thenable.register class ThenableProxy(object): """Proxy to object that supports ``.then()``.""" def _set_promise_target(self, p): self._p = p def then(self, on_success, on_error=None): return self._p.then(on_success, on_error) def cancel(self): return self._p.cancel() def throw1(self, exc=None): return self._p.throw1(exc) def throw(self, exc=None, tb=None, propagate=True): return self._p.throw(exc, tb=tb, propagate=propagate) @property def cancelled(self): return self._p.cancelled @property def ready(self): return self._p.ready @property def failed(self): return self._p.failed
{ "repo_name": "ammarkhann/FinalSeniorCode", "path": "lib/python2.7/site-packages/vine/abstract.py", "copies": "3", "size": "1723", "license": "mit", "hash": -9199506627753413000, "line_mean": 22.2837837838, "line_max": 74, "alpha_frac": 0.6204294835, "autogenerated": false, "ratio": 3.94279176201373, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.606322124551373, "avg_score": null, "num_lines": null }
"""Abstract classes.""" from __future__ import absolute_import, unicode_literals import abc from .five import with_metaclass, Callable __all__ = ['Thenable'] @with_metaclass(abc.ABCMeta) class Thenable(Callable): # pragma: no cover """Object that supports ``.then()``.""" __slots__ = () @abc.abstractmethod def then(self, on_success, on_error=None): raise NotImplementedError() @abc.abstractmethod def throw(self, exc=None, tb=None, propagate=True): raise NotImplementedError() @abc.abstractmethod def cancel(self): raise NotImplementedError() @classmethod def __subclasshook__(cls, C): if cls is Thenable: if any('then' in B.__dict__ for B in C.__mro__): return True return NotImplemented @classmethod def register(cls, other): # overide to return other so `register` can be used as a decorator type(cls).register(cls, other) return other @Thenable.register class ThenableProxy(object): """Proxy to object that supports ``.then()``.""" def _set_promise_target(self, p): self._p = p def then(self, on_success, on_error=None): return self._p.then(on_success, on_error) def cancel(self): return self._p.cancel() def throw1(self, exc=None): return self._p.throw1(exc) def throw(self, exc=None, tb=None, propagate=True): return self._p.throw(exc, tb=tb, propagate=propagate) @property def cancelled(self): return self._p.cancelled @property def ready(self): return self._p.ready @property def failed(self): return self._p.failed
{ "repo_name": "cloudera/hue", "path": "desktop/core/ext-py/vine-1.2.0/vine/abstract.py", "copies": "3", "size": "1699", "license": "apache-2.0", "hash": 6656787850085339000, "line_mean": 22.5972222222, "line_max": 74, "alpha_frac": 0.6168334314, "autogenerated": false, "ratio": 3.92378752886836, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.604062096026836, "avg_score": null, "num_lines": null }
"""Abstract classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import yaml import random import subprocess import numpy as np import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector from tensorflow.python.client import device_lib from utils import io_utils from chatbot.components import * from chatbot.globals import DEFAULT_FULL_CONFIG, OPTIMIZERS def gpu_found(): """Returns True if tensorflow finds at least 1 GPU.""" devices = device_lib.list_local_devices() return len([x.name for x in devices if x.device_type == 'GPU']) > 0 class Model(object): """Superclass of all subsequent model classes. """ def __init__(self, logger, dataset, params): """ Args: logger: returned by getLogger & called by subclasses. Passed here so we know what object to use for info/warn/error. dataset: object that inherits from data.Dataset. params: (dict) user-specified params that override those in DEFAULT_FULL_CONFIG above. """ self.log = logger self.__dict__['__params'] = Model.fill_params(dataset, params) # Make particularly useful ckpt directories for website configurations. if 'website_config' in self.ckpt_dir: self.ckpt_dir = Model._build_hparam_path( ckpt_dir=self.ckpt_dir, num_layers=self.num_layers, max_seq_len=self.max_seq_len) self.log.info("New ckpt dir:", self.ckpt_dir) # Configure gpu options if we are using one. if gpu_found(): self.log.info("GPU Found. Setting allow_growth to True.") gpu_config = tf.ConfigProto() gpu_config.gpu_options.allow_growth = True self.sess = tf.Session(config=gpu_config) else: self.log.warning("GPU not found. Not recommended for training.") self.sess = tf.Session() with self.graph.name_scope(tf.GraphKeys.SUMMARIES): self.global_step = tf.Variable(initial_value=0, trainable=False) self.learning_rate = tf.constant(self.learning_rate) # Create ckpt_dir if user hasn't already (if exists, has no effect). subprocess.call(['mkdir', '-p', self.ckpt_dir]) self.projector_config = projector.ProjectorConfig() # Good practice to set as None in constructor. self.loss = None self.file_writer = None self.merged = None self.train_op = None self.saver = None def compile(self): """ Configure training process and initialize model. Inspired by Keras. Either restore model parameters or create fresh ones. - Checks if we can both (1) find a checkpoint state, and (2) a valid V1/V2 checkpoint path. - If we can't, then just re-initialize model with fresh params. """ self.log.info("Checking for checkpoints . . .") checkpoint_state = tf.train.get_checkpoint_state(self.ckpt_dir) if not self.reset_model and checkpoint_state \ and tf.train.checkpoint_exists(checkpoint_state.model_checkpoint_path): print("Reading model parameters from", checkpoint_state.model_checkpoint_path) self.file_writer = tf.summary.FileWriter(self.ckpt_dir) self.saver = tf.train.Saver(tf.global_variables()) self.saver.restore(self.sess, checkpoint_state.model_checkpoint_path) else: print("Created model with fresh parameters:\n\t", self.ckpt_dir) # Recursively delete all files in output but keep directories. subprocess.call([ 'find', self.ckpt_dir, '-type', 'f', '-exec', 'rm', '{}', ';' ]) self.file_writer = tf.summary.FileWriter(self.ckpt_dir) # Add operation for calling all variable initializers. init_op = tf.global_variables_initializer() # Construct saver (adds save/restore ops to all). self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3) # Add the fully-constructed graph to the event file. self.file_writer.add_graph(self.sess.graph) # Initialize all model variables. self.sess.run(init_op) # Store model config in ckpt dir for easy loading later. with open(os.path.join(self.ckpt_dir, 'config.yml'), 'w') as f: yaml.dump(getattr(self, "params"), f, default_flow_style=False) def save(self, summaries=None): """ Args: summaries: merged summary instance returned by session.run. """ if self.saver is None: raise ValueError("Tried saving model before defining a saver.") ckpt_fname = os.path.join(self.ckpt_dir, "{}.ckpt".format(self.data_name)) # Saves the state of all global variables in a ckpt file. self.saver.save(self.sess, ckpt_fname, global_step=self.global_step) if summaries is not None: self.file_writer.add_summary(summaries, self.global_step.eval(self.sess)) else: self.log.info("Save called without summaries.") def close(self, save_current=True): """Call then when training session is terminated. - Saves the current model/checkpoint state. - Freezes the model into a protobuf file in self.ckpt_dir. - Closes context managers for file_writing and session. """ # First save the checkpoint as usual. if save_current: self.save() # Freeze me, for I am infinite. self.freeze() # Be a responsible bot and close my file writer. self.file_writer.close() # Formally exit the session, farewell to all. self.sess.close() @property def graph(self): return self.sess.graph @staticmethod def fill_params(dataset, params): """For now, essentially just returns (already parsed) params, but placed here in case I want to customize later (likely). """ # Replace (string) specification of dataset with the actual instance. params['dataset'] = dataset params['dataset_params']['data_name'] = dataset.name if params['model_params']['ckpt_dir'] == 'out': params['model_params']['ckpt_dir'] += '/'+dataset.name # Define alias in case older models still use it. params['model_params']['is_chatting'] = params['model_params']['decode'] return params def freeze(self): """Useful for e.g. deploying model on website. Args: directory containing model ckpt files we'd like to freeze. """ if not tf.get_collection('freezer'): self.log.warning('No freezer found. Not saving a frozen model.') return # Note: output_node_names is only used to tell tensorflow what is can # throw away in the frozen graph (e.g. training ops). output_node_names = ",".join( [t.name.rstrip(':0') for t in tf.get_collection('freezer')]) self.log.info('Output node names: %r', output_node_names) # Save a graph with only the bare necessities for chat sessions. output_graph_def = tf.graph_util.convert_variables_to_constants( self.sess, self.graph.as_graph_def(), output_node_names.split(',')) output_fname = os.path.join(self.ckpt_dir, "frozen_model.pb") with tf.gfile.GFile(output_fname, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) subprocess.call(['cp', self.dataset.paths['vocab'], self.ckpt_dir]) def __getattr__(self, name): if name == 'params': camel_case = self.data_name.title().replace('_', '') replace_dict = {'dataset': "data."+camel_case} return {**self.__dict__['__params'], **replace_dict} elif name in DEFAULT_FULL_CONFIG: # Requesting a top-level key. return self.__dict__['__params'][name] else: for k in DEFAULT_FULL_CONFIG.keys(): if not isinstance(self.__dict__['__params'][k], dict): continue if name in self.__dict__['__params'][k]: return self.__dict__['__params'][k][name] raise AttributeError(name) @staticmethod def _build_hparam_path(ckpt_dir, **kwargs): """Returns relative path build from args for descriptive checkpointing. The new path becomes ckpt_dir appended with directories named by kwargs: - If a given kwargs[key] is a string, that is set as the appended dir name. - Otherwise, it gets formatted, e.g. for key='learning_rate' it may become 'learning_rate_0_001' Returns: ckpt_dir followed by sequentially appended directories, named by kwargs. """ kwargs = copy.deepcopy(kwargs) new_ckpt_dir = ckpt_dir for key in sorted(kwargs): if not isinstance(kwargs[key], str): dir_name = key + "_" + str(kwargs[key]).replace('.', '_') else: dir_name = kwargs[key] new_ckpt_dir = os.path.join(new_ckpt_dir, dir_name) return new_ckpt_dir class BucketModel(Model): """Abstract class. Any classes that extend BucketModel just need to customize their graph structure in __init__ and implement the step(...) function. The real motivation for making this was to be able to use the true Model abstract class for all classes in this directory, bucketed or not, r1.0 or r0.12. """ def __init__(self, logger, buckets, dataset, params): self.buckets = buckets super(BucketModel, self).__init__( logger=logger, dataset=dataset, params=params) def compile(self): """ Configure training process. Name was inspired by Keras. <3 """ if self.losses is None: raise ValueError("Tried compiling model before defining losses.") print("Configuring training operations. This may take some time . . . ") # Note: variables are trainable=True by default. params = tf.trainable_variables() # train_op will store the parameter (S)GD train_op. self.apply_gradients = [] optimizer = OPTIMIZERS[self.optimizer](self.learning_rate) for b in range(len(self.buckets)): gradients = tf.gradients(self.losses[b], params) # Gradient clipping is actually extremely simple, it basically just # checks if L2Norm(gradients) > max_gradient, and if it is, # it returns (gradients / L2Norm(gradients)) * max_grad. clipped_gradients, _ = tf.clip_by_global_norm( gradients, self.max_gradient) self.apply_gradients.append(optimizer.apply_gradients( zip(clipped_gradients, params),global_step=self.global_step)) super(BucketModel, self).compile() def check_input_lengths(self, inputs, expected_lengths): """ Raises: ValueError: if length of encoder_inputs, decoder_inputs, or target_weights disagrees with bucket size for the specified bucket_id. """ for input, length in zip(inputs, expected_lengths): if len(input) != length: raise ValueError("Input length doesn't match bucket size:" " %d != %d." % (len(input), length)) def get_batch(self, data, bucket_id): """Get a random batch of data from the specified bucket, prepare for step. Args: data: tuple of len(self.buckets). data[bucket_id] == [source_ids, target_ids] bucket_id: integer, which bucket to get the batch for. Returns: The triple (encoder_inputs, decoder_inputs, target_weights) for the constructed batch that has the proper format to call step(...) later. """ encoder_size, decoder_size = self.buckets[bucket_id] encoder_inputs, decoder_inputs = [], [] # Get a random batch of encoder and decoder inputs from data, # pad them if needed, reverse encoder inputs and add GO to decoder. for _ in range(self.batch_size): encoder_input, decoder_input = random.choice(data[bucket_id]) # BasicEncoder inputs are padded and then reversed. encoder_pad = [io_utils.PAD_ID] * (encoder_size - len(encoder_input)) encoder_inputs.append(list(reversed(encoder_input + encoder_pad))) # DynamicDecoder inputs get an extra "GO" symbol, and are padded then. decoder_pad= [io_utils.PAD_ID] * (decoder_size - len(decoder_input) - 1) decoder_inputs.append([io_utils.GO_ID] + decoder_input + decoder_pad) # Define some small helper functions before we re-index & weight. def inputs_to_unit(uid, inputs): """ Return re-indexed version of inputs array. Description in params below. :param uid: index identifier for input timestep/unit/node of interest. :param inputs: single batch of data; inputs[i] is i'th sentence. :return: re-indexed version of inputs as numpy array. """ return np.array([inputs[i][uid] for i in range(self.batch_size)], dtype=np.int32) batch_encoder_inputs = [inputs_to_unit(i, encoder_inputs) for i in range(encoder_size)] batch_decoder_inputs = [inputs_to_unit(i, decoder_inputs) for i in range(decoder_size)] batch_weights = list(np.ones(shape=(decoder_size, self.batch_size), dtype=np.float32)) # Set weight for the final decoder unit to 0.0 for all batches. for i in range(self.batch_size): batch_weights[-1][i] = 0.0 # Also set any decoder-input-weights to 0 that have PAD # as target decoder output. for unit_id in range(decoder_size - 1): ids_with_pad_target = [b for b in range(self.batch_size) if decoder_inputs[b][unit_id+1] == io_utils.PAD_ID] batch_weights[unit_id][ids_with_pad_target] = 0.0 return batch_encoder_inputs, batch_decoder_inputs, batch_weights def train(self, dataset): """ Train chatbot. """ from chatbot.legacy._train import train train(self, dataset) def decode(self): """ Create chat session between user & chatbot. """ from chatbot.legacy._decode import decode decode(self) def step(self, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only=False): """Run a step of the model. Args: encoder_inputs: list of numpy int vectors to feed as encoder inputs. decoder_inputs: list of numpy int vectors to feed as decoder inputs. target_weights: list of numpy float vectors to feed as target weights. bucket_id: which bucket of the model to use. """ raise NotImplemented
{ "repo_name": "mckinziebrandon/DeepChatModels", "path": "chatbot/_models.py", "copies": "1", "size": "15359", "license": "mit", "hash": -7475259770316069000, "line_mean": 42.143258427, "line_max": 101, "alpha_frac": 0.6091542418, "autogenerated": false, "ratio": 4.138776610078146, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5247930851878146, "avg_score": null, "num_lines": null }
# Abstract classes. class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in ['implicit', 'value'] if hasattr(self, key)] arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) class NodeEvent(Event): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark class CollectionStartEvent(NodeEvent): def __init__(self, implicit, start_mark=None, end_mark=None, flow_style=None): self.tag = None self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class CollectionEndEvent(Event): pass # Implementations. class StreamStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndEvent(Event): pass class DocumentStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit self.version = version self.tags = tags class DocumentEndEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit class AliasEvent(NodeEvent): pass class ScalarEvent(NodeEvent): def __init__(self, implicit, value, start_mark=None, end_mark=None, style=None): self.tag = None self.implicit = implicit self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class SequenceStartEvent(CollectionStartEvent): pass class SequenceEndEvent(CollectionEndEvent): pass class MappingStartEvent(CollectionStartEvent): pass class MappingEndEvent(CollectionEndEvent): pass
{ "repo_name": "keelerm84/powerline", "path": "powerline/lint/markedjson/events.py", "copies": "3", "size": "2044", "license": "mit", "hash": 8992677476957434000, "line_mean": 20.0721649485, "line_max": 67, "alpha_frac": 0.7025440313, "autogenerated": false, "ratio": 3.0553064275037367, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5257850458803737, "avg_score": null, "num_lines": null }
"""Abstract classes representing UI toolkit elements.""" from abc import ABC, abstractmethod, abstractproperty class UIWindow(ABC): """Class representing a window in the user interface toolkit.""" def __init__(self, ui, line, column, n_lines, n_columns): """Initialize an UIWindow object. Args: ui: UI object representing the user interface. line: Index of the vertical position of the window in the UI. column: Index of the horizontal position of the window in the UI. n_lines: Window's height. n_columns: Window's width. """ self._ui = ui self._line = line self._column = column self._n_lines = n_lines self._n_columns = n_columns self._cursor_show = False self._cursor = (0, 0) @property def cursor(self): """Position of the cursor.""" return self._cursor @cursor.setter def cursor(self, cursor): self._cursor = cursor def cursor_show(self): """Enable the cursor.""" self._cursor_show = True def cursor_hide(self): """Disable the cursor.""" self._cursor_show = False @abstractmethod def attributes_set(self, colors, properties): """Set the defaults attributes for the window. Args: colors: Default Color object for the window. properties: Default Property object for the window. """ return @abstractmethod def line_update(self, line, content, attributes): """Update a line. Args: line: Index of the line to be updated. content: New content of the line. attributes: List of attributes, one for each char in content. """ return @abstractmethod def line_insert(self, line, content, attributes): """Insert a line. Args: line: Index of the line to be inserted. content: New content of the line. attributes: List of attributes, one for each char in content. """ return @abstractmethod def line_delete(self, line): """Delete a line. Args: line: Index of the line to be deleted. """ return @abstractmethod def refresh(self): """Refresh the content of the window.""" return @abstractmethod def key_get(self): """Wait for a keypress from inside the window and return it. Returns: Key object representing the keypress. """ return class UI(ABC): """Class representing the user interface toolkit.""" @abstractmethod def __init__(self): """Initialize an UI object.""" self._ui_windows = list() @abstractproperty def max_lines(self): """Maximum number of lines in the UI.""" return @abstractproperty def max_columns(self): """Maximum number of columns in the UI.""" return @abstractmethod def refresh(self): """Refresh the UI.""" return @abstractmethod def window_create(self, line, column, n_lines, n_columns): """Create a new window. Args: line: Index of the vertical position of the window in the UI. column: Index of the horizontal position of the window in the UI. n_lines: Window's height. n_columns: Window's width. """ return
{ "repo_name": "AndreaOrru/Yugen", "path": "ui.py", "copies": "1", "size": "3511", "license": "bsd-2-clause", "hash": 434861101148135500, "line_mean": 25.2014925373, "line_max": 77, "alpha_frac": 0.5736257477, "autogenerated": false, "ratio": 4.706434316353888, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 134 }
"""Abstract class for autoregressive decoding. Either for the recurrent decoder, or for the transformer decoder. The autoregressive decoder uses the while loop to get the outputs. Descendants should only specify the initial state and the while loop body. """ from typing import ( NamedTuple, Callable, Tuple, cast, Type, List, Optional, Any) import numpy as np import tensorflow as tf from neuralmonkey.dataset import Dataset from neuralmonkey.decorators import tensor from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs from neuralmonkey.logging import log, warn from neuralmonkey.model.sequence import EmbeddedSequence from neuralmonkey.nn.utils import dropout from neuralmonkey.tf_utils import get_variable from neuralmonkey.vocabulary import Vocabulary, START_TOKEN, UNK_TOKEN_INDEX def extend_namedtuple(name: str, parent: Type, fields: List[Tuple[str, Type]]) -> Type: """Extend a named tuple to contain more elements.""" # pylint: disable=protected-access ext_fields = [(k, parent._field_types[k]) for k in parent._fields] + fields # pylint: enable=protected-access return cast(Type, NamedTuple(name, ext_fields)) LoopState = NamedTuple( "LoopState", [("histories", Any), ("constants", Any), ("feedables", Any)]) # pylint: enable=invalid-name # The LoopState is a structure that works with the tf.while_loop function # the decoder loop state stores all the information that is not invariant # for the decoder run. # pylint: disable=invalid-name DecoderHistories = NamedTuple( "DecoderHistories", [("logits", tf.TensorArray), ("decoder_outputs", tf.TensorArray), ("outputs", tf.TensorArray), ("mask", tf.TensorArray)]) # float matrix, 0s and 1s DecoderConstants = NamedTuple( "DecoderConstants", [("train_inputs", Optional[tf.Tensor])]) DecoderFeedables = NamedTuple( "DecoderFeedables", [("step", tf.Tensor), # 1D int, number of the step ("finished", tf.Tensor), # batch-sized, bool ("input_symbol", tf.Tensor), ("prev_logits", tf.Tensor)]) # pylint: disable=too-many-public-methods,too-many-instance-attributes class AutoregressiveDecoder(ModelPart): # pylint: disable=too-many-arguments def __init__(self, name: str, vocabulary: Vocabulary, data_id: str, max_output_len: int, dropout_keep_prob: float = 1.0, embedding_size: int = None, embeddings_source: EmbeddedSequence = None, tie_embeddings: bool = False, label_smoothing: float = None, supress_unk: bool = False, save_checkpoint: str = None, load_checkpoint: str = None, initializers: InitializerSpecs = None) -> None: """Initialize parameters common for all autoregressive decoders. Arguments: name: Name of the decoder. Should be unique accross all Neural Monkey objects. vocabulary: Target vocabulary. data_id: Target data series. max_output_len: Maximum length of an output sequence. dropout_keep_prob: Probability of keeping a value during dropout. embedding_size: Size of embedding vectors for target words. embeddings_source: Embedded sequence to take embeddings from. tie_embeddings: Use decoder.embedding_matrix also in place of the output decoding matrix. label_smoothing: Label smoothing parameter. supress_unk: If true, decoder will not produce symbols for unknown tokens. """ ModelPart.__init__(self, name, save_checkpoint, load_checkpoint, initializers) log("Initializing decoder, name: '{}'".format(name)) self.vocabulary = vocabulary self.data_id = data_id self.max_output_len = max_output_len self.dropout_keep_prob = dropout_keep_prob self.embedding_size = embedding_size self.embeddings_source = embeddings_source self.label_smoothing = label_smoothing self.tie_embeddings = tie_embeddings self.supress_unk = supress_unk # check the values of the parameters (max_output_len, ...) if max_output_len <= 0: raise ValueError("Maximum sequence length must be " "a positive integer.") if dropout_keep_prob < 0.0 or dropout_keep_prob > 1.0: raise ValueError("Dropout keep probability must be" "a real number in the interval [0,1].") if self.embedding_size is None and self.embeddings_source is None: raise ValueError("You must specify either embedding size or the " "embedded sequence from which to reuse the " "embeddings (e.g. set either 'embedding_size' or " " 'embeddings_source' parameter)") if self.embeddings_source is not None: if self.embedding_size is not None: warn("Overriding the embedding_size parameter with the" " size of the reused embeddings from the encoder.") self.embedding_size = ( self.embeddings_source.embedding_matrix.get_shape()[1].value) with self.use_scope(): self.train_mode = tf.placeholder(tf.bool, [], "train_mode") self.go_symbols = tf.placeholder(tf.int32, [None], "go_symbols") self.train_inputs = tf.placeholder( tf.int32, [None, None], "train_inputs") self.train_mask = tf.placeholder( tf.float32, [None, None], "train_mask") # pylint: enable=too-many-arguments @tensor def batch_size(self) -> tf.Tensor: return tf.shape(self.go_symbols)[0] @tensor def decoding_w(self) -> tf.Variable: if (self.tie_embeddings and self.embedding_size != self.output_dimension): raise ValueError( "`embedding_size must be equal to the output_projection " "size when using the `tie_embeddings` option") with tf.name_scope("output_projection"): if self.tie_embeddings: return tf.transpose(self.embedding_matrix) return get_variable( "state_to_word_W", [self.output_dimension, len(self.vocabulary)], initializer=tf.glorot_uniform_initializer()) @tensor def decoding_b(self) -> Optional[tf.Variable]: if self.tie_embeddings: return tf.zeros(len(self.vocabulary)) with tf.name_scope("output_projection"): return get_variable( "state_to_word_b", [len(self.vocabulary)], initializer=tf.zeros_initializer()) @tensor def embedding_matrix(self) -> tf.Variable: """Variables and operations for embedding of input words. If we are reusing word embeddings, this function takes the embedding matrix from the first encoder """ if self.embeddings_source is not None: return self.embeddings_source.embedding_matrix assert self.embedding_size is not None return get_variable( name="word_embeddings", shape=[len(self.vocabulary), self.embedding_size], initializer=tf.glorot_uniform_initializer()) def get_logits(self, state: tf.Tensor) -> tf.Tensor: """Project the decoder's output layer to logits over the vocabulary.""" state = dropout(state, self.dropout_keep_prob, self.train_mode) logits = tf.matmul(state, self.decoding_w) + self.decoding_b if self.supress_unk: unk_mask = tf.one_hot( UNK_TOKEN_INDEX, depth=len(self.vocabulary), on_value=-1e-9) logits += unk_mask return logits @tensor def train_loop_result(self) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: return self.decoding_loop(train_mode=True) @tensor def train_logits(self) -> tf.Tensor: # THE LAST TRAIN INPUT IS NOT USED IN DECODING FUNCTION # (just as a target) return tuple(self.train_loop_result)[0] @tensor def train_output_states(self) -> tf.Tensor: return tuple(self.train_loop_result)[1] @tensor def train_logprobs(self) -> tf.Tensor: return tf.nn.log_softmax(self.train_logits) @tensor def train_xents(self) -> tf.Tensor: train_targets = tf.transpose(self.train_inputs) softmax_function = None if self.label_smoothing: softmax_function = ( lambda labels, logits: tf.losses.softmax_cross_entropy( tf.one_hot(labels, len(self.vocabulary)), logits, label_smoothing=self.label_smoothing)) return tf.contrib.seq2seq.sequence_loss( tf.transpose(self.train_logits, perm=[1, 0, 2]), train_targets, tf.transpose(self.train_mask), average_across_batch=False, softmax_loss_function=softmax_function) @tensor def train_loss(self) -> tf.Tensor: return tf.reduce_mean(self.train_xents) @property def cost(self) -> tf.Tensor: return self.train_loss @tensor def runtime_loop_result(self) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: return self.decoding_loop(train_mode=False) @tensor def runtime_logits(self) -> tf.Tensor: return tuple(self.runtime_loop_result)[0] @tensor def runtime_output_states(self) -> tf.Tensor: return tuple(self.runtime_loop_result)[1] @tensor def runtime_mask(self) -> tf.Tensor: return tuple(self.runtime_loop_result)[2] @tensor def decoded(self) -> tf.Tensor: # We disable generating of <pad> tokens at index 0 # (self.runtime_logits[:, :, 1:]). This shifts the indices # of the decoded tokens (therefore, we add +1 to the decoded # output indices). # self.runtime_logits is of size [batch, sentence_len, vocabulary_size] return tf.argmax(self.runtime_logits[:, :, 1:], -1) + 1 @tensor def runtime_xents(self) -> tf.Tensor: train_targets = tf.transpose(self.train_inputs) batch_major_logits = tf.transpose(self.runtime_logits, [1, 0, 2]) min_time = tf.minimum(tf.shape(train_targets)[1], tf.shape(batch_major_logits)[1]) # NOTE if done properly, there should be padding of the shorter # sequence instead of cropping to the length of the shorter one return tf.contrib.seq2seq.sequence_loss( logits=batch_major_logits[:, :min_time], targets=train_targets[:, :min_time], weights=tf.transpose(self.train_mask)[:, :min_time], average_across_batch=False) @tensor def runtime_loss(self) -> tf.Tensor: return tf.reduce_mean(self.runtime_xents) @tensor def runtime_logprobs(self) -> tf.Tensor: return tf.nn.log_softmax(self.runtime_logits) @property def output_dimension(self) -> int: raise NotImplementedError("Abstract property") def get_initial_loop_state(self) -> LoopState: dec_output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0, name="decoder_outputs") logit_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0, name="logits") mask_ta = tf.TensorArray(dtype=tf.bool, dynamic_size=True, size=0, name="mask") outputs_ta = tf.TensorArray(dtype=tf.int32, dynamic_size=True, size=0, name="outputs") feedables = DecoderFeedables( step=tf.constant(0, tf.int32), finished=tf.zeros([self.batch_size], dtype=tf.bool), input_symbol=self.go_symbols, prev_logits=tf.zeros([self.batch_size, len(self.vocabulary)])) histories = DecoderHistories( logits=logit_ta, decoder_outputs=dec_output_ta, mask=mask_ta, outputs=outputs_ta) constants = DecoderConstants(train_inputs=self.train_inputs) return LoopState( histories=histories, constants=constants, feedables=feedables) def loop_continue_criterion(self, *args) -> tf.Tensor: """Decide whether to break out of the while loop. Arguments: loop_state: ``LoopState`` instance (see the docs for this module). Represents current decoder loop state. """ loop_state = LoopState(*args) finished = loop_state.feedables.finished not_all_done = tf.logical_not(tf.reduce_all(finished)) before_max_len = tf.less(loop_state.feedables.step, self.max_output_len) return tf.logical_and(not_all_done, before_max_len) def get_body(self, train_mode: bool, sample: bool = False) -> Callable: """Return the while loop body function.""" raise NotImplementedError("Abstract method") def finalize_loop(self, final_loop_state: LoopState, train_mode: bool) -> None: """Execute post-while loop operations. Arguments: final_loop_state: Decoder loop state at the end of the decoding loop. train_mode: Boolean flag, telling whether this is a training run. """ def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[ tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Run the decoding while loop. Calls get_initial_loop_state and constructs tf.while_loop with the continuation criterion returned from loop_continue_criterion, and body function returned from get_body. After finishing the tf.while_loop, it calls finalize_loop to further postprocess the final decoder loop state (usually by stacking TensorArrays containing decoding histories). Arguments: train_mode: Boolean flag, telling whether this is a training run. sample: Boolean flag, telling whether we should sample the output symbols from the output distribution instead of using argmax or gold data. """ initial_loop_state = self.get_initial_loop_state() final_loop_state = tf.while_loop( self.loop_continue_criterion, self.get_body(train_mode, sample), initial_loop_state) self.finalize_loop(final_loop_state, train_mode) logits = final_loop_state.histories.logits.stack() decoder_outputs = final_loop_state.histories.decoder_outputs.stack() decoded = final_loop_state.histories.outputs.stack() # TODO mask should include also the end symbol mask = final_loop_state.histories.mask.stack() return logits, decoder_outputs, mask, decoded def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict: """Populate the feed dictionary for the decoder object. Arguments: dataset: The dataset to use for the decoder. train: Boolean flag, telling whether this is a training run. """ sentences = dataset.maybe_get_series(self.data_id) if sentences is None and train: raise ValueError("When training, you must feed " "reference sentences") fd = {} # type: FeedDict fd[self.train_mode] = train go_symbol_idx = self.vocabulary.get_word_index(START_TOKEN) fd[self.go_symbols] = np.full([len(dataset)], go_symbol_idx, dtype=np.int32) if sentences is not None: sentences_list = list(sentences) # train_mode=False, since we don't want to <unk>ize target words! inputs, weights = self.vocabulary.sentences_to_tensor( sentences_list, self.max_output_len, train_mode=False, add_start_symbol=False, add_end_symbol=True, pad_to_max_len=False) fd[self.train_inputs] = inputs fd[self.train_mask] = weights return fd
{ "repo_name": "juliakreutzer/bandit-neuralmonkey", "path": "neuralmonkey/decoders/autoregressive.py", "copies": "1", "size": "16671", "license": "bsd-3-clause", "hash": 2624161640536853500, "line_mean": 37.324137931, "line_max": 79, "alpha_frac": 0.6065023094, "autogenerated": false, "ratio": 4.1729662077597, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.52794685171597, "avg_score": null, "num_lines": null }
"""Abstract class for collective groups.""" from abc import ABCMeta from abc import abstractmethod from ray.util.collective.types import AllReduceOptions, BarrierOptions, \ ReduceOptions, AllGatherOptions, BroadcastOptions, ReduceScatterOptions class BaseGroup(metaclass=ABCMeta): def __init__(self, world_size, rank, group_name): """Init the process group with basic information. Args: world_size (int): The total number of processes in the group. rank (int): The rank of the current process. group_name (str): The group name. """ self._world_size = world_size self._rank = rank self._group_name = group_name @property def rank(self): """Return the rank of the current process.""" return self._rank @property def world_size(self): """Return the number of processes in this group.""" return self._world_size @property def group_name(self): """Return the group name of this group.""" return self._group_name def destroy_group(self): """GC the communicators.""" pass @classmethod def backend(cls): """The backend of this collective group.""" raise NotImplementedError() @abstractmethod def allreduce(self, tensor, allreduce_options=AllReduceOptions()): raise NotImplementedError() @abstractmethod def barrier(self, barrier_options=BarrierOptions()): raise NotImplementedError() @abstractmethod def reduce(self, tensor, reduce_options=ReduceOptions()): raise NotImplementedError() @abstractmethod def allgather(self, tensor_list, tensor, allgather_options=AllGatherOptions()): raise NotImplementedError() @abstractmethod def broadcast(self, tensor, broadcast_options=BroadcastOptions()): raise NotImplementedError() @abstractmethod def reducescatter(self, tensor, tensor_list, reducescatter_options=ReduceScatterOptions()): raise NotImplementedError() @abstractmethod def send(self, tensor, dst_rank): raise NotImplementedError() @abstractmethod def recv(self, tensor, src_rank): raise NotImplementedError()
{ "repo_name": "ray-project/ray", "path": "python/ray/util/collective/collective_group/base_collective_group.py", "copies": "1", "size": "2378", "license": "apache-2.0", "hash": 954405815908585200, "line_mean": 28, "line_max": 75, "alpha_frac": 0.6291000841, "autogenerated": false, "ratio": 4.954166666666667, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 82 }
# abstract class for physical objects in POR import math import pyglet import settings from collections import namedtuple from utils import Point, Vec2d, point_in_rect class Entity(pyglet.sprite.Sprite): # just set this to the settings image value in the subclass. IMAGE = None name = None def __init__(self, *args, **kwargs): if self.IMAGE: args = (pyglet.resource.image(self.IMAGE),) + args super(Entity, self).__init__(*args, **kwargs) # center the image anchors self.image.anchor_x = self.image.width / 2.0 self.image.anchor_y = self.image.height / 2.0 # create the sprite self.sprite = pyglet.sprite.Sprite(img = self.image) self.velocity_x = 0.0 self.velocity_y = 0.0 self.gp = Point(settings.ENTITY_DEFAULT_GAME_POSITION_X, settings.ENTITY_DEFAULT_GAME_POSITION_Y) self.init() def init(self): """ Called post init. Pls override me. """ pass def collides_with(self, other_entity): # circular collision detection collision_threshold = self.image.width / 2.0 + other_entity.image.width / 2.0 distance = self.distance(other_entity) return distance < collision_threshold def collided_objects(self, objects): # returns a list of objects that were collided. return filter(self.collides_with, objects) def distance(self, other_entity): #tnx pythagoras x1, y1 = self.gp x2, y2 = other_entity.gp return math.sqrt((x1 - x2)**2 + (y1 - y2)**2) def update(self, dt): self.gp = Point(self.gp.x + dt * self.velocity_x, self.gp.y + dt * self.velocity_y) def __repr__(self): classname = self.__class__.__name__ x, y = self.gp return "{classname}({x}, {y})".format(**locals()) class ObjectList(object): def __init__(self, klass_dict): self.objects = [] self.visible = [] self.klass_dict = klass_dict def add(self, points): for point in points: # points can be named (in Tiled) so you can have multiple object types # in the one level. handy for grouping all collideable objects together. Klass = self.klass_dict.get(point.name, self.klass_dict['default']) new = Klass() new.name = point.name new.gp = Point(point.x, point.y) self.objects.append(new) def update_visible(self, viewport): self.visible = [ obj for obj in self.objects if point_in_rect(obj.gp, viewport)] def __getitem__(self, item): return self.objects[item] def pop(self, idx=None): return self.objects.pop(idx)
{ "repo_name": "rozifus/TeamStrong13_4", "path": "por/entity.py", "copies": "1", "size": "2793", "license": "mit", "hash": 1651773766294494000, "line_mean": 30.0333333333, "line_max": 91, "alpha_frac": 0.589688507, "autogenerated": false, "ratio": 3.754032258064516, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9820986017218867, "avg_score": 0.004546949569129773, "num_lines": 90 }
"""Abstract class for writing chat clients.""" import aiohttp import asyncio import collections import itertools import json import logging import random import re import time import datetime import os from hangups import (javascript, parsers, exceptions, http_utils, channel, event, schemas) logger = logging.getLogger(__name__) ORIGIN_URL = 'https://talkgadget.google.com' IMAGE_UPLOAD_URL = 'http://docs.google.com/upload/photos/resumable' PVT_TOKEN_URL = 'https://talkgadget.google.com/talkgadget/_/extension-start' CHAT_INIT_URL = 'https://talkgadget.google.com/u/0/talkgadget/_/chat' CHAT_INIT_PARAMS = { 'prop': 'aChromeExtension', 'fid': 'gtn-roster-iframe-id', 'ec': '["ci:ec",true,true,false]', 'pvt': None, # Populated later } CHAT_INIT_REGEX = re.compile( r"(?:<script>AF_initDataCallback\((.*?)\);</script>)", re.DOTALL ) # Timeout to send for setactiveclient requests: ACTIVE_TIMEOUT_SECS = 120 # Minimum timeout between subsequent setactiveclient requests: SETACTIVECLIENT_LIMIT_SECS = 60 # Initial account data received after the client is first connected: InitialData = collections.namedtuple('InitialData', [ 'conversation_states', # [ClientConversationState] 'self_entity', # ClientEntity 'entities', # [ClientEntity] 'conversation_participants', # [ClientConversationParticipantData] 'sync_timestamp' # datetime ]) class Client(object): """Instant messaging client for Hangouts. Maintains a connections to the servers, emits events, and accepts commands. """ def __init__(self, cookies): """Create new client. cookies is a dictionary of authentication cookies. """ # Event fired when the client connects for the first time with # arguments (initial_data). self.on_connect = event.Event('Client.on_connect') # Event fired when the client reconnects after being disconnected with # arguments (). self.on_reconnect = event.Event('Client.on_reconnect') # Event fired when the client is disconnected with arguments (). self.on_disconnect = event.Event('Client.on_disconnect') # Event fired when a ClientStateUpdate arrives with arguments # (state_update). self.on_state_update = event.Event('Client.on_state_update') self._cookies = cookies proxy = os.environ.get('HTTP_PROXY') if proxy: self._connector = aiohttp.ProxyConnector(proxy) else: self._connector = aiohttp.TCPConnector() # hangups.channel.Channel instantiated in connect() self._channel = None # API key sent with every request: self._api_key = None # Parameters sent in request headers: self._header_date = None self._header_version = None self._header_id = None # String identifying this client: self._client_id = None # Account email address: self._email = None # Time in seconds that the client as last set as active: self._last_active_secs = 0.0 # ActiveClientState enum value or None: self._active_client_state = None # Future for Channel.listen self._listen_future = None ########################################################################## # Public methods ########################################################################## @asyncio.coroutine def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or Client.disconnect has been called. """ initial_data = yield from self._initialize_chat() self._channel = channel.Channel(self._cookies, self._connector) @asyncio.coroutine def _on_connect(): """Wrapper to fire on_connect with initial_data.""" yield from self.on_connect.fire(initial_data) self._channel.on_connect.add_observer(_on_connect) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_message.add_observer(self._on_push_data) self._listen_future = asyncio.async(self._channel.listen()) try: yield from self._listen_future except asyncio.CancelledError: pass logger.info('disconnecting gracefully') @asyncio.coroutine def disconnect(self): """Gracefully disconnect from the server. When disconnection is complete, Client.connect will return. """ self._listen_future.cancel() self._connector.close() @asyncio.coroutine def set_active(self): """Set this client as active. While a client is active, no other clients will raise notifications. Call this method whenever there is an indication the user is interacting with this client. This method may be called very frequently, and it will only make a request when necessary. """ is_active = (self._active_client_state == schemas.ActiveClientState.IS_ACTIVE_CLIENT) timed_out = (time.time() - self._last_active_secs > SETACTIVECLIENT_LIMIT_SECS) if not is_active or timed_out: # Update these immediately so if the function is called again # before the API request finishes, we don't start extra requests. self._active_client_state = ( schemas.ActiveClientState.IS_ACTIVE_CLIENT ) self._last_active_secs = time.time() try: yield from self.setactiveclient(True, ACTIVE_TIMEOUT_SECS) except exceptions.NetworkError as e: logger.warning('Failed to set active client: {}'.format(e)) else: logger.info('Set active client for {} seconds' .format(ACTIVE_TIMEOUT_SECS)) ########################################################################## # Private methods ########################################################################## @asyncio.coroutine def _initialize_chat(self): """Request push channel creation and initial chat data. Returns instance of InitialData. The response body is a HTML document containing a series of script tags containing JavaScript objects. We need to parse the objects to get at the data. """ # We first need to fetch the 'pvt' token, which is required for the # initialization request (otherwise it will return 400). try: res = yield from http_utils.fetch( 'get', PVT_TOKEN_URL, cookies=self._cookies, connector=self._connector ) CHAT_INIT_PARAMS['pvt'] = javascript.loads(res.body.decode())[1] logger.info('Found PVT token: {}'.format(CHAT_INIT_PARAMS['pvt'])) except (exceptions.NetworkError, ValueError) as e: raise exceptions.HangupsError('Failed to fetch PVT token: {}' .format(e)) # Now make the actual initialization request: try: res = yield from http_utils.fetch( 'get', CHAT_INIT_URL, cookies=self._cookies, params=CHAT_INIT_PARAMS, connector=self._connector ) except exceptions.NetworkError as e: raise exceptions.HangupsError('Initialize chat request failed: {}' .format(e)) # Parse the response by using a regex to find all the JS objects, and # parsing them. Not everything will be parsable, but we don't care if # an object we don't need can't be parsed. data_dict = {} for data in CHAT_INIT_REGEX.findall(res.body.decode()): try: logger.debug("Attempting to load javascript: {}..." .format(repr(data[:100]))) data = javascript.loads(data) # pylint: disable=invalid-sequence-index data_dict[data['key']] = data['data'] except ValueError as e: try: data = data.replace("data:function(){return", "data:") data = data.replace("}}", "}") data = javascript.loads(data) data_dict[data['key']] = data['data'] except ValueError as e: raise # logger.debug('Failed to parse initialize chat object: {}\n{}' # .format(e, data)) # Extract various values that we will need. try: self._api_key = data_dict['ds:7'][0][2] self._email = data_dict['ds:34'][0][2] self._header_date = data_dict['ds:2'][0][4] self._header_version = data_dict['ds:2'][0][6] self._header_id = data_dict['ds:4'][0][7] _sync_timestamp = parsers.from_timestamp( # cgserp? # data_dict['ds:21'][0][1][4] # data_dict['ds:35'][0][1][4] data_dict['ds:21'][0][1][4] ) except KeyError as e: raise exceptions.HangupsError('Failed to get initialize chat ' 'value: {}'.format(e)) # Parse the entity representing the current user. self_entity = schemas.CLIENT_GET_SELF_INFO_RESPONSE.parse( # cgsirp? # data_dict['ds:20'][0] # data_dict['ds:35'][0] data_dict['ds:20'][0] ).self_entity # Parse every existing conversation's state, including participants. initial_conv_states = schemas.CLIENT_CONVERSATION_STATE_LIST.parse( # csrcrp? # data_dict['ds:19'][0][3] # data_dict['ds:36'][0][3] data_dict['ds:19'][0][3] ) initial_conv_parts = [] for conv_state in initial_conv_states: initial_conv_parts.extend(conv_state.conversation.participant_data) # Parse the entities for the user's contacts (doesn't include users not # in contacts). If this fails, continue without the rest of the # entities. initial_entities = [] try: entities = schemas.INITIAL_CLIENT_ENTITIES.parse( # cgserp? # data_dict['ds:21'][0] # data_dict['ds:37'][0] data_dict['ds:21'][0] ) except ValueError as e: logger.warning('Failed to parse initial client entities: {}' .format(e)) else: initial_entities.extend(entities.entities) initial_entities.extend(e.entity for e in itertools.chain( entities.group1.entity, entities.group2.entity, entities.group3.entity, entities.group4.entity, entities.group5.entity )) return InitialData(initial_conv_states, self_entity, initial_entities, initial_conv_parts, _sync_timestamp) def _get_cookie(self, name): """Return a cookie for raise error if that cookie was not provided.""" try: return self._cookies[name] except KeyError: raise KeyError("Cookie '{}' is required".format(name)) def _get_request_header(self): """Return request header for chat API request.""" return [ [6, 3, self._header_version, self._header_date], [self._client_id, self._header_id], None, "en" ] @asyncio.coroutine def _on_push_data(self, submission): """Parse ClientStateUpdate and call the appropriate events.""" for state_update in parsers.parse_submission(submission): if isinstance(state_update, dict) and 'client_id' in state_update: # Hack to receive client ID: self._client_id = state_update['client_id'] logger.info('Received new client_id: {}' .format(self._client_id)) else: self._active_client_state = ( state_update.state_update_header.active_client_state ) yield from self.on_state_update.fire(state_update) @asyncio.coroutine def _request(self, endpoint, body_json, use_json=True): """Make chat API request. Raises hangups.NetworkError if the request fails. """ url = 'https://clients6.google.com/chat/v1/{}'.format(endpoint) res = yield from self._base_request( url, 'application/json+protobuf', json.dumps(body_json), use_json=use_json ) return res @asyncio.coroutine def _base_request(self, url, content_type, data, use_json=True): """Make API request. Raises hangups.NetworkError if the request fails. """ headers = channel.get_authorization_headers(self._get_cookie('SAPISID')) headers['content-type'] = content_type required_cookies = ['SAPISID', 'HSID', 'SSID', 'APISID', 'SID'] cookies = {cookie: self._get_cookie(cookie) for cookie in required_cookies} params = { 'key': self._api_key, 'alt': 'json' if use_json else 'protojson', } res = yield from http_utils.fetch( 'post', url, headers=headers, cookies=cookies, params=params, data=data, connector=self._connector ) logger.debug('Response to request for {} was {}:\n{}' .format(url, res.code, res.body)) return res ########################################################################### # Raw API request methods ########################################################################### @asyncio.coroutine def syncallnewevents(self, timestamp): """List all events occuring at or after timestamp. This method requests protojson rather than json so we have one chat message parser rather than two. timestamp: datetime.datetime instance specifying the time after which to return all events occuring in. Raises hangups.NetworkError if the request fails. Returns a ClientSyncAllNewEventsResponse. """ res = yield from self._request('conversations/syncallnewevents', [ self._get_request_header(), # last_sync_timestamp parsers.to_timestamp(timestamp), [], None, [], False, [], 1048576 # max_response_size_bytes ], use_json=False) try: res = schemas.CLIENT_SYNC_ALL_NEW_EVENTS_RESPONSE.parse( javascript.loads(res.body.decode()) ) except ValueError as e: raise exceptions.NetworkError('Response failed to parse: {}' .format(e)) # can return 200 but still contain an error status = res.response_header.status if status != 1: raise exceptions.NetworkError('Response status is \'{}\'' .format(status)) return res @asyncio.coroutine def sendchatmessage( self, conversation_id, segments, image_id=None, otr_status=schemas.OffTheRecordStatus.ON_THE_RECORD ): """Send a chat message to a conversation. conversation_id must be a valid conversation ID. segments must be a list of message segments to send, in pblite format. otr_status determines whether the message will be saved in the server's chat history. Note that the OTR status of the conversation is irrelevant, clients may send messages with whatever OTR status they like. image_id is an option ID of an image retrieved from Client.upload_image. If provided, the image will be attached to the message. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, None, None, [], [ segments, [] ], [[image_id, False]] if image_id else None, [ [conversation_id], client_generated_id, otr_status.value, ], None, None, None, [] ] res = yield from self._request('conversations/sendchatmessage', body) # sendchatmessage can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def upload_image(self, image_file, filename=None): """Upload an image that can be later attached to a chat message. image_file is a file-like object containing an image. The name of the uploaded file may be changed by specifying the filename argument. Raises hangups.NetworkError if the request fails. Returns ID of uploaded image. """ image_filename = (filename if filename else os.path.basename(image_file.name)) image_data = image_file.read() # Create image and request upload URL res1 = yield from self._base_request( IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', json.dumps({ "protocolVersion": "0.8", "createSessionRequest": { "fields": [{ "external": { "name": "file", "filename": image_filename, "put": {}, "size": len(image_data), } }] } })) upload_url = (json.loads(res1.body.decode())['sessionStatus'] ['externalFieldTransfers'][0]['putInfo']['url']) # Upload image data and get image ID res2 = yield from self._base_request( upload_url, 'application/octet-stream', image_data ) return (json.loads(res2.body.decode())['sessionStatus'] ['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo']['photoid']) @asyncio.coroutine def setactiveclient(self, is_active, timeout_secs): """Set the active client. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('clients/setactiveclient', [ self._get_request_header(), # is_active: whether the client is active or not is_active, # full_jid: user@domain/resource "{}/{}".format(self._email, self._client_id), # timeout_secs: timeout in seconds for this client to be active timeout_secs ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) ########################################################################### # UNUSED raw API request methods (by hangups itself) for reference ########################################################################### @asyncio.coroutine def removeuser(self, conversation_id): """Leave group conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) res = yield from self._request('conversations/removeuser', [ self._get_request_header(), None, None, None, [ [conversation_id], client_generated_id, 2 ], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def deleteconversation(self, conversation_id): """Delete one-to-one conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/deleteconversation', [ self._get_request_header(), [conversation_id], # Not sure what timestamp should be there, last time I have tried it # Hangouts client in GMail sent something like now() - 5 hours parsers.to_timestamp( datetime.datetime.now(tz=datetime.timezone.utc) ), None, [], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def settyping(self, conversation_id, typing=schemas.TypingStatus.TYPING): """Send typing notification. conversation_id must be a valid conversation ID. typing must be a hangups.TypingStatus Enum. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/settyping', [ self._get_request_header(), [conversation_id], typing.value ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def updatewatermark(self, conv_id, read_timestamp): """Update the watermark (read timestamp) for a conversation. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/updatewatermark', [ self._get_request_header(), # conversation_id [conv_id], # latest_read_timestamp parsers.to_timestamp(read_timestamp), ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def getselfinfo(self): """Return information about your account. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('contacts/getselfinfo', [ self._get_request_header(), [], [] ]) return json.loads(res.body.decode()) @asyncio.coroutine def setfocus(self, conversation_id): """Set focus (occurs whenever you give focus to a client). Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/setfocus', [ self._get_request_header(), [conversation_id], 1, 20 ]) return json.loads(res.body.decode()) @asyncio.coroutine def searchentities(self, search_string, max_results): """Search for people. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('contacts/searchentities', [ self._get_request_header(), [], search_string, max_results ]) return json.loads(res.body.decode()) @asyncio.coroutine def setpresence(self, online, mood=None): """Set the presence or mood of this client. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('presence/setpresence', [ self._get_request_header(), [ # timeout_secs timeout in seconds for this presence 720, # client_presence_state: # 40 => DESKTOP_ACTIVE # 30 => DESKTOP_IDLE # 1 => NONE 1 if online else 40, ], None, None, # True if going offline, False if coming online [not online], # UTF-8 smiley like 0x1f603 [mood], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) @asyncio.coroutine def querypresence(self, chat_id): """Check someone's presence status. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('presence/querypresence', [ self._get_request_header(), [ [chat_id] ], [1, 2, 5, 7, 8] ]) return json.loads(res.body.decode()) @asyncio.coroutine def getentitybyid(self, chat_id_list): """Return information about a list of contacts. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('contacts/getentitybyid', [ self._get_request_header(), None, [[str(chat_id)] for chat_id in chat_id_list], ], use_json=False) try: res = schemas.CLIENT_GET_ENTITY_BY_ID_RESPONSE.parse( javascript.loads(res.body.decode()) ) except ValueError as e: raise exceptions.NetworkError('Response failed to parse: {}' .format(e)) # can return 200 but still contain an error status = res.response_header.status if status != 1: raise exceptions.NetworkError('Response status is \'{}\'' .format(status)) return res @asyncio.coroutine def getconversation(self, conversation_id, event_timestamp, max_events=50): """Return conversation events. This is mainly used for retrieving conversation scrollback. Events occurring before event_timestamp are returned, in order from oldest to newest. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/getconversation', [ self._get_request_header(), [[conversation_id], [], []], # conversationSpec False, # includeConversationMetadata True, # includeEvents None, # ??? max_events, # maxEventsPerConversation # eventContinuationToken (specifying timestamp is sufficient) [ None, # eventId None, # storageContinuationToken parsers.to_timestamp(event_timestamp), # eventTimestamp ] ], use_json=False) try: res = schemas.CLIENT_GET_CONVERSATION_RESPONSE.parse( javascript.loads(res.body.decode()) ) except ValueError as e: raise exceptions.NetworkError('Response failed to parse: {}' .format(e)) # can return 200 but still contain an error status = res.response_header.status if status != 1: raise exceptions.NetworkError('Response status is \'{}\'' .format(status)) return res @asyncio.coroutine def syncrecentconversations(self): """List the contents of recent conversations, including messages. Similar to syncallnewevents, but appears to return a limited number of conversations (20) rather than all conversations in a given date range. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/syncrecentconversations', [self._get_request_header()]) return json.loads(res.body.decode()) @asyncio.coroutine def setchatname(self, conversation_id, name): """Set the name of a conversation. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2 ** 32) body = [ self._get_request_header(), None, name, None, [[conversation_id], client_generated_id, 1] ] res = yield from self._request('conversations/renameconversation', body) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': logger.warning('renameconversation returned status {}' .format(res_status)) raise exceptions.NetworkError() @asyncio.coroutine def sendeasteregg(self, conversation_id, easteregg): """Send a easteregg to a conversation. easteregg may not be empty. Raises hangups.NetworkError if the request fails. """ body = [ self._get_request_header(), [conversation_id], [easteregg, None, 1] ] res = yield from self._request('conversations/easteregg', body) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': logger.warning('easteregg returned status {}' .format(res_status)) raise exceptions.NetworkError() @asyncio.coroutine def createconversation(self, chat_id_list, force_group=False): """Create new conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation (except from yourself). New conversation ID is returned as res['conversation']['id']['id'] Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), 1 if len(chat_id_list) == 1 and not force_group else 2, client_generated_id, None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list] ] res = yield from self._request('conversations/createconversation', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res @asyncio.coroutine def adduser(self, conversation_id, chat_id_list): """Add user to existing conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list], None, [ [conversation_id], client_generated_id, 2, None, 4 ] ] res = yield from self._request('conversations/adduser', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res
{ "repo_name": "j16sdiz/hangups", "path": "hangups/client.py", "copies": "1", "size": "32734", "license": "mit", "hash": 7052772239694569000, "line_mean": 37.1960326721, "line_max": 80, "alpha_frac": 0.5542249649, "autogenerated": false, "ratio": 4.503232906864768, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0002448323076000863, "num_lines": 857 }
"""Abstract class for writing chat clients.""" import aiohttp import asyncio import json import logging import random import time import datetime import os from hangups import (javascript, parsers, exceptions, http_utils, channel, event, hangouts_pb2, pblite, __version__) logger = logging.getLogger(__name__) ORIGIN_URL = 'https://talkgadget.google.com' IMAGE_UPLOAD_URL = 'http://docs.google.com/upload/photos/resumable' # Timeout to send for setactiveclient requests: ACTIVE_TIMEOUT_SECS = 120 # Minimum timeout between subsequent setactiveclient requests: SETACTIVECLIENT_LIMIT_SECS = 60 class Client(object): """Instant messaging client for Hangouts. Maintains a connections to the servers, emits events, and accepts commands. """ def __init__(self, cookies): """Create new client. cookies is a dictionary of authentication cookies. """ # Event fired when the client connects for the first time with # arguments (). self.on_connect = event.Event('Client.on_connect') # Event fired when the client reconnects after being disconnected with # arguments (). self.on_reconnect = event.Event('Client.on_reconnect') # Event fired when the client is disconnected with arguments (). self.on_disconnect = event.Event('Client.on_disconnect') # Event fired when a StateUpdate arrives with arguments (state_update). self.on_state_update = event.Event('Client.on_state_update') self._cookies = cookies proxy = os.environ.get('HTTP_PROXY') if proxy: self._connector = aiohttp.ProxyConnector(proxy) else: self._connector = aiohttp.TCPConnector() self._channel = channel.Channel(self._cookies, self._connector) # Future for Channel.listen self._listen_future = None self._request_header = hangouts_pb2.RequestHeader( # Ignore most of the RequestHeader fields since they aren't # required. client_version=hangouts_pb2.ClientVersion( major_version='hangups-{}'.format(__version__), ), language_code='en', ) # String identifying this client (populated later): self._client_id = None # String email address for this account (populated later): self._email = None # Active client management parameters: # Time in seconds that the client as last set as active: self._last_active_secs = 0.0 # ActiveClientState enum int value or None: self._active_client_state = None ########################################################################## # Public methods ########################################################################## @asyncio.coroutine def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or Client.disconnect has been called. """ # Forward the Channel events to the Client events. self._channel.on_connect.add_observer(self.on_connect.fire) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_receive_array.add_observer(self._on_receive_array) # Listen for StateUpdate messages from the Channel until it # disconnects. self._listen_future = asyncio.async(self._channel.listen()) try: yield from self._listen_future except asyncio.CancelledError: pass self._connector.close() logger.info('Client.connect returning because Channel.listen returned') @asyncio.coroutine def disconnect(self): """Gracefully disconnect from the server. When disconnection is complete, Client.connect will return. """ logger.info('Disconnecting gracefully...') self._listen_future.cancel() try: yield from self._listen_future except asyncio.CancelledError: pass logger.info('Disconnected gracefully') @asyncio.coroutine def set_active(self): """Set this client as active. While a client is active, no other clients will raise notifications. Call this method whenever there is an indication the user is interacting with this client. This method may be called very frequently, and it will only make a request when necessary. """ is_active = (self._active_client_state == hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE) timed_out = (time.time() - self._last_active_secs > SETACTIVECLIENT_LIMIT_SECS) if not is_active or timed_out: # Update these immediately so if the function is called again # before the API request finishes, we don't start extra requests. self._active_client_state = ( hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE ) self._last_active_secs = time.time() # The first time this is called, we need to retrieve the user's # email address. if self._email is None: try: get_self_info_response = yield from self.getselfinfo() except exceptions.NetworkError as e: logger.warning('Failed to find email address: {}' .format(e)) return self._email = ( get_self_info_response.self_entity.properties.email[0] ) # If the client_id hasn't been received yet, we can't set the # active client. if self._client_id is None: logger.info( 'Cannot set active client until client_id is received' ) return try: yield from self.setactiveclient(True, ACTIVE_TIMEOUT_SECS) except exceptions.NetworkError as e: logger.warning('Failed to set active client: {}'.format(e)) else: logger.info('Set active client for {} seconds' .format(ACTIVE_TIMEOUT_SECS)) ########################################################################## # Private methods ########################################################################## def _get_cookie(self, name): """Return a cookie for raise error if that cookie was not provided.""" try: return self._cookies[name] except KeyError: raise KeyError("Cookie '{}' is required".format(name)) @asyncio.coroutine def _on_receive_array(self, array): """Parse channel array and call the appropriate events.""" if array[0] == 'noop': pass # This is just a keep-alive, ignore it. else: wrapper = json.loads(array[0]['p']) # Wrapper appears to be a Protocol Buffer message, but encoded via # field numbers as dictionary keys. Since we don't have a parser # for that, parse it ad-hoc here. if '3' in wrapper: # This is a new client_id. self._client_id = wrapper['3']['2'] logger.info('Received new client_id: %r', self._client_id) # Once client_id is received, the channel is ready to have # services added. yield from self._add_channel_services() if '2' in wrapper: pblite_message = json.loads(wrapper['2']['2']) if pblite_message[0] == 'cbu': # This is a (Client)BatchUpdate containing StateUpdate # messages. batch_update = hangouts_pb2.BatchUpdate() pblite.decode(batch_update, pblite_message, ignore_first_item=True) for state_update in batch_update.state_update: logger.debug('Received StateUpdate:\n%s', state_update) header = state_update.state_update_header self._active_client_state = header.active_client_state yield from self.on_state_update.fire(state_update) else: logger.info('Ignoring message: %r', pblite_message[0]) @asyncio.coroutine def _add_channel_services(self): """Add services to the channel. The services we add to the channel determine what kind of data we will receive on it. The "babel" service includes what we need for Hangouts. If this fails for some reason, hangups will never receive any events. This needs to be re-called whenever we open a new channel (when there's a new SID and client_id. """ logger.info('Adding channel services...') # Based on what Hangouts for Chrome does over 2 requests, this is # trimmed down to 1 request that includes the bare minimum to make # things work. map_list = [dict(p=json.dumps({"3": {"1": {"1": "babel"}}}))] yield from self._channel.send_maps(map_list) logger.info('Channel services added') @asyncio.coroutine def _pb_request(self, endpoint, request_pb, response_pb): """Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. """ logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = yield from self._base_request( 'https://clients6.google.com/chat/v1/{}'.format(endpoint), 'application/json+protobuf', # The request body is pblite. 'protojson', # The response should be pblite. json.dumps(pblite.encode(request_pb)) ) pblite.decode(response_pb, javascript.loads(res.body.decode()), ignore_first_item=True) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) ) @asyncio.coroutine def _base_request(self, url, content_type, response_type, data): """Send a generic authenticated POST request. Args: url (str): URL of request. content_type (str): Request content type. response_type (str): The desired response format. Valid options are: 'json' (JSON), 'protojson' (pblite), and 'proto' (binary Protocol Buffer). 'proto' requires manually setting an extra header 'X-Goog-Encode-Response-If-Executable: base64'. data (str): Request body data. Returns: FetchResponse: Response containing HTTP code, cookies, and body. Raises: NetworkError: If the request fails. """ sapisid_cookie = self._get_cookie('SAPISID') headers = channel.get_authorization_headers(sapisid_cookie) headers['content-type'] = content_type required_cookies = ['SAPISID', 'HSID', 'SSID', 'APISID', 'SID'] cookies = {cookie: self._get_cookie(cookie) for cookie in required_cookies} params = { # "alternative representation type" (desired response format). 'alt': response_type, } res = yield from http_utils.fetch( 'post', url, headers=headers, cookies=cookies, params=params, data=data, connector=self._connector ) return res def _get_request_header_pb(self): """Return populated RequestHeader message.""" # resource is allowed to be null if it's not available yet (the Chrome # client does this for the first getentitybyid call) if self._client_id is not None: self._request_header.client_identifier.resource = self._client_id return self._request_header def get_client_generated_id(self): """Return ID for client_generated_id fields.""" return random.randint(0, 2**32) ########################################################################### # Raw API request methods ########################################################################### @asyncio.coroutine def syncallnewevents(self, timestamp): """List all events occurring at or after timestamp. This method requests protojson rather than json so we have one chat message parser rather than two. timestamp: datetime.datetime instance specifying the time after which to return all events occurring in. Raises hangups.NetworkError if the request fails. Returns SyncAllNewEventsResponse. """ request = hangouts_pb2.SyncAllNewEventsRequest( request_header=self._get_request_header_pb(), last_sync_timestamp=parsers.to_timestamp(timestamp), max_response_size_bytes=1048576, ) response = hangouts_pb2.SyncAllNewEventsResponse() yield from self._pb_request('conversations/syncallnewevents', request, response) return response @asyncio.coroutine def sendchatmessage( self, conversation_id, segments, image_id=None, otr_status=hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD, delivery_medium=None): """Send a chat message to a conversation. conversation_id must be a valid conversation ID. segments must be a list of message segments to send, in pblite format. otr_status determines whether the message will be saved in the server's chat history. Note that the OTR status of the conversation is irrelevant, clients may send messages with whatever OTR status they like. image_id is an option ID of an image retrieved from Client.upload_image. If provided, the image will be attached to the message. Raises hangups.NetworkError if the request fails. """ segments_pb = [] for segment_pblite in segments: segment_pb = hangouts_pb2.Segment() pblite.decode(segment_pb, segment_pblite) segments_pb.append(segment_pb) if delivery_medium is None: delivery_medium = hangouts_pb2.DeliveryMedium( medium_type=hangouts_pb2.DELIVERY_MEDIUM_BABEL, ) request = hangouts_pb2.SendChatMessageRequest( request_header=self._get_request_header_pb(), message_content=hangouts_pb2.MessageContent( segment=segments_pb, ), event_request_header=hangouts_pb2.EventRequestHeader( conversation_id=hangouts_pb2.ConversationId( id=conversation_id, ), client_generated_id=self.get_client_generated_id(), expected_otr=otr_status, delivery_medium=delivery_medium, event_type=hangouts_pb2.EVENT_TYPE_REGULAR_CHAT_MESSAGE, ), ) if image_id is not None: request.existing_media = hangouts_pb2.ExistingMedia( photo=hangouts_pb2.Photo(photo_id=image_id) ) response = hangouts_pb2.SendChatMessageResponse() yield from self._pb_request('conversations/sendchatmessage', request, response) return response @asyncio.coroutine def setactiveclient(self, is_active, timeout_secs): """Set the active client. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SetActiveClientRequest( request_header=self._get_request_header_pb(), is_active=is_active, full_jid="{}/{}".format(self._email, self._client_id), timeout_secs=timeout_secs, ) response = hangouts_pb2.SetActiveClientResponse() yield from self._pb_request('clients/setactiveclient', request, response) return response @asyncio.coroutine def updatewatermark(self, conv_id, read_timestamp): """Update the watermark (read timestamp) for a conversation. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.UpdateWatermarkRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conv_id), last_read_timestamp=parsers.to_timestamp(read_timestamp), ) response = hangouts_pb2.UpdateWatermarkResponse() yield from self._pb_request('conversations/updatewatermark', request, response) return response @asyncio.coroutine def getentitybyid(self, gaia_id_list): """Return information about a list of contacts. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.GetEntityByIdRequest( request_header=self._get_request_header_pb(), batch_lookup_spec=[hangouts_pb2.EntityLookupSpec(gaia_id=gaia_id) for gaia_id in gaia_id_list], ) response = hangouts_pb2.GetEntityByIdResponse() yield from self._pb_request('contacts/getentitybyid', request, response) return response @asyncio.coroutine def renameconversation( self, conversation_id, name, otr_status=hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD): """Rename a conversation. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.RenameConversationRequest( request_header=self._get_request_header_pb(), new_name=name, event_request_header=hangouts_pb2.EventRequestHeader( conversation_id=hangouts_pb2.ConversationId( id=conversation_id, ), client_generated_id=self.get_client_generated_id(), expected_otr=otr_status, ), ) response = hangouts_pb2.RenameConversationResponse() yield from self._pb_request('conversations/renameconversation', request, response) return response @asyncio.coroutine def getconversation(self, conversation_id, event_timestamp, max_events=50): """Return conversation events. This is mainly used for retrieving conversation scrollback. Events occurring before event_timestamp are returned, in order from oldest to newest. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.GetConversationRequest( request_header=self._get_request_header_pb(), conversation_spec=hangouts_pb2.ConversationSpec( conversation_id=hangouts_pb2.ConversationId(id=conversation_id) ), include_event=True, max_events_per_conversation=max_events, event_continuation_token=hangouts_pb2.EventContinuationToken( event_timestamp=parsers.to_timestamp(event_timestamp) ), ) response = hangouts_pb2.GetConversationResponse() yield from self._pb_request('conversations/getconversation', request, response) return response @asyncio.coroutine def upload_image(self, image_file, filename=None): """Upload an image that can be later attached to a chat message. image_file is a file-like object containing an image. The name of the uploaded file may be changed by specifying the filename argument. Raises hangups.NetworkError if the request fails. Returns ID of uploaded image. """ image_filename = (filename if filename else os.path.basename(image_file.name)) image_data = image_file.read() # Create image and request upload URL res1 = yield from self._base_request( IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', 'json', json.dumps({ "protocolVersion": "0.8", "createSessionRequest": { "fields": [{ "external": { "name": "file", "filename": image_filename, "put": {}, "size": len(image_data), } }] } })) upload_url = (json.loads(res1.body.decode())['sessionStatus'] ['externalFieldTransfers'][0]['putInfo']['url']) # Upload image data and get image ID res2 = yield from self._base_request( upload_url, 'application/octet-stream', 'json', image_data ) return (json.loads(res2.body.decode())['sessionStatus'] ['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo']['photoid']) ########################################################################### # UNUSED raw API request methods (by hangups itself) for reference ########################################################################### @asyncio.coroutine def removeuser( self, conversation_id, otr_status=hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD): """Leave group conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.RemoveUserRequest( request_header=self._get_request_header_pb(), event_request_header=hangouts_pb2.EventRequestHeader( conversation_id=hangouts_pb2.ConversationId( id=conversation_id, ), client_generated_id=self.get_client_generated_id(), expected_otr=otr_status, ), ) response = hangouts_pb2.RemoveUserResponse() yield from self._pb_request('conversations/removeuser', request, response) return response @asyncio.coroutine def deleteconversation(self, conversation_id): """Delete one-to-one conversation. One-to-one conversations are "sticky"; they can't actually be deleted. This API clears the event history of the specified conversation up to delete_upper_bound_timestamp, hiding it if no events remain. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ timestamp = parsers.to_timestamp( datetime.datetime.now(tz=datetime.timezone.utc) ) request = hangouts_pb2.DeleteConversationRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conversation_id), delete_upper_bound_timestamp=timestamp ) response = hangouts_pb2.DeleteConversationResponse() yield from self._pb_request('conversations/deleteconversation', request, response) return response @asyncio.coroutine def settyping(self, conversation_id, typing=hangouts_pb2.TYPING_TYPE_STARTED): """Send typing notification. conversation_id must be a valid conversation ID. typing must be a hangups.TypingType Enum. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SetTypingRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conversation_id), type=typing, ) response = hangouts_pb2.SetTypingResponse() yield from self._pb_request('conversations/settyping', request, response) return response @asyncio.coroutine def getselfinfo(self): """Return information about your account. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.GetSelfInfoRequest( request_header=self._get_request_header_pb(), ) response = hangouts_pb2.GetSelfInfoResponse() yield from self._pb_request('contacts/getselfinfo', request, response) return response @asyncio.coroutine def setfocus(self, conversation_id): """Set focus to a conversation. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SetFocusRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conversation_id), type=hangouts_pb2.FOCUS_TYPE_FOCUSED, timeout_secs=20, ) response = hangouts_pb2.SetFocusResponse() yield from self._pb_request('conversations/setfocus', request, response) return response @asyncio.coroutine def searchentities(self, search_string, max_results): """Search for people. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SearchEntitiesRequest( request_header=self._get_request_header_pb(), query=search_string, max_count=max_results, ) response = hangouts_pb2.SearchEntitiesResponse() yield from self._pb_request('contacts/searchentities', request, response) return response @asyncio.coroutine def setpresence(self, online, mood=None): """Set the presence or mood of this client. Raises hangups.NetworkError if the request fails. """ type_ = (hangouts_pb2.CLIENT_PRESENCE_STATE_DESKTOP_ACTIVE if online else hangouts_pb2.CLIENT_PRESENCE_STATE_DESKTOP_IDLE) request = hangouts_pb2.SetPresenceRequest( request_header=self._get_request_header_pb(), presence_state_setting=hangouts_pb2.PresenceStateSetting( timeout_secs=720, type=type_, ), ) if mood is not None: segment = ( request.mood_setting.mood_message.mood_content.segment.add() ) segment.type = hangouts_pb2.SEGMENT_TYPE_TEXT segment.text = mood response = hangouts_pb2.SetPresenceResponse() yield from self._pb_request('presence/setpresence', request, response) return response @asyncio.coroutine def querypresence(self, gaia_id): """Check someone's presence status. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.QueryPresenceRequest( request_header=self._get_request_header_pb(), participant_id=[hangouts_pb2.ParticipantId(gaia_id=gaia_id)], field_mask=[hangouts_pb2.FIELD_MASK_REACHABLE, hangouts_pb2.FIELD_MASK_AVAILABLE, hangouts_pb2.FIELD_MASK_DEVICE], ) response = hangouts_pb2.QueryPresenceResponse() yield from self._pb_request('presence/querypresence', request, response) return response @asyncio.coroutine def syncrecentconversations(self, max_conversations=100, max_events_per_conversation=1): """List the contents of recent conversations, including messages. Similar to syncallnewevents, but returns a limited number of conversations rather than all conversations in a given date range. Can be used to retrieve archived conversations. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SyncRecentConversationsRequest( request_header=self._get_request_header_pb(), max_conversations=max_conversations, max_events_per_conversation=max_events_per_conversation, sync_filter=[hangouts_pb2.SYNC_FILTER_INBOX], ) response = hangouts_pb2.SyncRecentConversationsResponse() yield from self._pb_request('conversations/syncrecentconversations', request, response) return response @asyncio.coroutine def setconversationnotificationlevel(self, conversation_id, level): """Set the notification level of a conversation. Pass hangouts_pb2.NOTIFICATION_LEVEL_QUIET to disable notifications, or hangouts_pb2.NOTIFICATION_LEVEL_RING to enable them. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.SetConversationNotificationLevelRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conversation_id), level=level, ) response = hangouts_pb2.SetConversationNotificationLevelResponse() yield from self._pb_request( 'conversations/setconversationnotificationlevel', request, response ) return response @asyncio.coroutine def easteregg(self, conversation_id, easteregg): """Send an easteregg to a conversation. easteregg may not be empty. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.EasterEggRequest( request_header=self._get_request_header_pb(), conversation_id=hangouts_pb2.ConversationId(id=conversation_id), easter_egg=hangouts_pb2.EasterEgg(message=easteregg), ) response = hangouts_pb2.EasterEggResponse() yield from self._pb_request('conversations/easteregg', request, response) return response @asyncio.coroutine def createconversation(self, chat_id_list, force_group=False): """Create new one-to-one or group conversation. chat_id_list is list of other users to invite to the conversation. Raises hangups.NetworkError if the request fails. """ is_group = len(chat_id_list) > 1 or force_group request = hangouts_pb2.CreateConversationRequest( request_header=self._get_request_header_pb(), type=(hangouts_pb2.CONVERSATION_TYPE_GROUP if is_group else hangouts_pb2.CONVERSATION_TYPE_ONE_TO_ONE), client_generated_id=self.get_client_generated_id(), invitee_id=[hangouts_pb2.InviteeID(gaia_id=chat_id) for chat_id in chat_id_list], ) response = hangouts_pb2.CreateConversationResponse() yield from self._pb_request('conversations/createconversation', request, response) return response @asyncio.coroutine def adduser(self, conversation_id, chat_id_list, otr_status=hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD): """Add users to an existing group conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation. Raises hangups.NetworkError if the request fails. """ request = hangouts_pb2.AddUserRequest( request_header=self._get_request_header_pb(), invitee_id=[hangouts_pb2.InviteeID(gaia_id=chat_id) for chat_id in chat_id_list], event_request_header=hangouts_pb2.EventRequestHeader( conversation_id=hangouts_pb2.ConversationId( id=conversation_id, ), client_generated_id=self.get_client_generated_id(), expected_otr=otr_status, ), ) response = hangouts_pb2.AddUserResponse() yield from self._pb_request('conversations/adduser', request, response) return response
{ "repo_name": "AshenDrops/hangups", "path": "hangups/client.py", "copies": "3", "size": "33048", "license": "mit", "hash": -212136493430848320, "line_mean": 39.6995073892, "line_max": 79, "alpha_frac": 0.5912914549, "autogenerated": false, "ratio": 4.459919028340081, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6551210483240081, "avg_score": null, "num_lines": null }
"""Abstract class for writing chat clients.""" import asyncio import base64 import binascii import collections import json import logging import os import random import time import google.protobuf.message from hangups import (exceptions, http_utils, channel, event, hangouts_pb2, pblite, version) logger = logging.getLogger(__name__) IMAGE_UPLOAD_URL = 'https://docs.google.com/upload/photos/resumable' # Timeout to send for setactiveclient requests: ACTIVE_TIMEOUT_SECS = 120 # Minimum timeout between subsequent setactiveclient requests: SETACTIVECLIENT_LIMIT_SECS = 60 # API key for `key` parameter (from Hangouts web client) API_KEY = 'AIzaSyD7InnYR3VKdb4j2rMUEbTCIr2VyEazl6k' # Base URL for API requests: BASE_URL = 'https://chat-pa.clients6.google.com' class Client: """Instant messaging client for Hangouts. Maintains a connections to the servers, emits events, and accepts commands. Args: cookies (dict): Google session cookies. Get these using :func:`get_auth`. max_retries (int): (optional) Maximum number of connection attempts hangups will make before giving up. Defaults to 5. retry_backoff_base (int): (optional) The base term for the exponential backoff. The following equation is used when calculating the number of seconds to wait prior to each retry: retry_backoff_base^(# of retries attempted thus far) Defaults to 2. """ def __init__(self, cookies, max_retries=5, retry_backoff_base=2): self._max_retries = max_retries self._retry_backoff_base = retry_backoff_base self.on_connect = event.Event('Client.on_connect') """ :class:`.Event` fired when the client connects for the first time. """ self.on_reconnect = event.Event('Client.on_reconnect') """ :class:`.Event` fired when the client reconnects after being disconnected. """ self.on_disconnect = event.Event('Client.on_disconnect') """ :class:`.Event` fired when the client is disconnected. """ self.on_state_update = event.Event('Client.on_state_update') """ :class:`.Event` fired when an update arrives from the server. Args: state_update: A ``StateUpdate`` message. """ # http_utils.Session instance (populated by .connect()): self._session = None # Cookies required to initialize Session: self._cookies = cookies # channel.Channel instance (populated by .connect()): self._channel = None # Future for Channel.listen (populated by .connect()): self._listen_future = None self._request_header = hangouts_pb2.RequestHeader( # Ignore most of the RequestHeader fields since they aren't # required. Sending a recognized client_id is important because it # changes the behaviour of some APIs (eg. get_conversation will # filter out EVENT_TYPE_GROUP_LINK_SHARING_MODIFICATION without # it). client_version=hangouts_pb2.ClientVersion( client_id=hangouts_pb2.CLIENT_ID_WEB_HANGOUTS, major_version='hangups-{}'.format(version.__version__), ), language_code='en', ) # String identifying this client (populated later): self._client_id = None # String email address for this account (populated later): self._email = None # Active client management parameters: # Time in seconds that the client as last set as active: self._last_active_secs = 0.0 # ActiveClientState enum int value or None: self._active_client_state = None ########################################################################## # Public methods ########################################################################## async def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called. """ proxy = os.environ.get('HTTP_PROXY') self._session = http_utils.Session(self._cookies, proxy=proxy) try: self._channel = channel.Channel( self._session, self._max_retries, self._retry_backoff_base ) # Forward the Channel events to the Client events. self._channel.on_connect.add_observer(self.on_connect.fire) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_receive_array.add_observer(self._on_receive_array) # Wrap the coroutine in a Future so it can be cancelled. self._listen_future = asyncio.ensure_future(self._channel.listen()) # Listen for StateUpdate messages from the Channel until it # disconnects. try: await self._listen_future except asyncio.CancelledError: # If this task is cancelled, we need to cancel our child task # as well. We don't need an additional yield because listen # cancels immediately. self._listen_future.cancel() logger.info( 'Client.connect returning because Channel.listen returned' ) finally: await self._session.close() async def disconnect(self): """Gracefully disconnect from the server. When disconnection is complete, :func:`connect` will return. """ logger.info('Graceful disconnect requested') # Cancel the listen task. We don't need an additional yield because # listen cancels immediately. self._listen_future.cancel() def get_request_header(self): """Return ``request_header`` for use when constructing requests. Returns: Populated request header. """ # resource is allowed to be null if it's not available yet (the Chrome # client does this for the first getentitybyid call) if self._client_id is not None: self._request_header.client_identifier.resource = self._client_id return self._request_header @staticmethod def get_client_generated_id(): """Return ``client_generated_id`` for use when constructing requests. Returns: Client generated ID. """ return random.randint(0, 2**32) async def set_active(self): """Set this client as active. While a client is active, no other clients will raise notifications. Call this method whenever there is an indication the user is interacting with this client. This method may be called very frequently, and it will only make a request when necessary. """ is_active = (self._active_client_state == hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE) timed_out = (time.time() - self._last_active_secs > SETACTIVECLIENT_LIMIT_SECS) if not is_active or timed_out: # Update these immediately so if the function is called again # before the API request finishes, we don't start extra requests. self._active_client_state = ( hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE ) self._last_active_secs = time.time() # The first time this is called, we need to retrieve the user's # email address. if self._email is None: try: get_self_info_request = hangouts_pb2.GetSelfInfoRequest( request_header=self.get_request_header(), ) get_self_info_response = await self.get_self_info( get_self_info_request ) except exceptions.NetworkError as e: logger.warning('Failed to find email address: {}' .format(e)) return self._email = ( get_self_info_response.self_entity.properties.email[0] ) # If the client_id hasn't been received yet, we can't set the # active client. if self._client_id is None: logger.info( 'Cannot set active client until client_id is received' ) return try: set_active_request = hangouts_pb2.SetActiveClientRequest( request_header=self.get_request_header(), is_active=True, full_jid="{}/{}".format(self._email, self._client_id), timeout_secs=ACTIVE_TIMEOUT_SECS, ) await self.set_active_client(set_active_request) except exceptions.NetworkError as e: logger.warning('Failed to set active client: {}'.format(e)) else: logger.info('Set active client for {} seconds' .format(ACTIVE_TIMEOUT_SECS)) async def upload_image(self, image_file, filename=None, *, return_uploaded_image=False): """Upload an image that can be later attached to a chat message. Args: image_file: A file-like object containing an image. filename (str): (optional) Custom name for the uploaded file. return_uploaded_image (bool): (optional) If True, return :class:`.UploadedImage` instead of image ID. Defaults to False. Raises: hangups.NetworkError: If the upload request failed. Returns: :class:`.UploadedImage` instance, or ID of the uploaded image. """ image_filename = filename or os.path.basename(image_file.name) image_data = image_file.read() # request an upload URL res = await self._base_request( IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', 'json', json.dumps({ "protocolVersion": "0.8", "createSessionRequest": { "fields": [{ "external": { "name": "file", "filename": image_filename, "put": {}, "size": len(image_data) } }] } }) ) try: upload_url = self._get_upload_session_status(res)[ 'externalFieldTransfers' ][0]['putInfo']['url'] except KeyError: raise exceptions.NetworkError( 'image upload failed: can not acquire an upload url' ) # upload the image data using the upload_url to get the upload info res = await self._base_request( upload_url, 'application/octet-stream', 'json', image_data ) try: raw_info = ( self._get_upload_session_status(res)['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo'] ) image_id = raw_info['photoid'] url = raw_info['url'] except KeyError: raise exceptions.NetworkError( 'image upload failed: can not fetch upload info' ) result = UploadedImage(image_id=image_id, url=url) return result if return_uploaded_image else result.image_id ########################################################################## # Private methods ########################################################################## @staticmethod def _get_upload_session_status(res): """Parse the image upload response to obtain status. Args: res: http_utils.FetchResponse instance, the upload response Returns: dict, sessionStatus of the response Raises: hangups.NetworkError: If the upload request failed. """ response = json.loads(res.body.decode()) if 'sessionStatus' not in response: try: info = ( response['errorMessage']['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo'] ) reason = '{} : {}'.format(info['status'], info['message']) except KeyError: reason = 'unknown reason' raise exceptions.NetworkError('image upload failed: {}'.format( reason )) return response['sessionStatus'] async def _on_receive_array(self, array): """Parse channel array and call the appropriate events.""" if array[0] == 'noop': pass # This is just a keep-alive, ignore it. else: wrapper = json.loads(array[0]['p']) # Wrapper appears to be a Protocol Buffer message, but encoded via # field numbers as dictionary keys. Since we don't have a parser # for that, parse it ad-hoc here. if '3' in wrapper: # This is a new client_id. self._client_id = wrapper['3']['2'] logger.info('Received new client_id: %r', self._client_id) # Once client_id is received, the channel is ready to have # services added. await self._add_channel_services() if '2' in wrapper: pblite_message = json.loads(wrapper['2']['2']) if pblite_message[0] == 'cbu': # This is a (Client)BatchUpdate containing StateUpdate # messages. batch_update = hangouts_pb2.BatchUpdate() pblite.decode(batch_update, pblite_message, ignore_first_item=True) for state_update in batch_update.state_update: logger.debug('Received StateUpdate:\n%s', state_update) header = state_update.state_update_header self._active_client_state = header.active_client_state await self.on_state_update.fire(state_update) else: logger.info('Ignoring message: %r', pblite_message[0]) async def _add_channel_services(self): """Add services to the channel. The services we add to the channel determine what kind of data we will receive on it. The "babel" service includes what we need for Hangouts. If this fails for some reason, hangups will never receive any events. The "babel_presence_last_seen" service is also required to receive presence notifications. This needs to be re-called whenever we open a new channel (when there's a new SID and client_id. """ logger.info('Adding channel services...') # Based on what Hangouts for Chrome does over 2 requests, this is # trimmed down to 1 request that includes the bare minimum to make # things work. services = ["babel", "babel_presence_last_seen"] map_list = [ dict(p=json.dumps({"3": {"1": {"1": service}}})) for service in services ] await self._channel.send_maps(map_list) logger.info('Channel services added') async def _pb_request(self, endpoint, request_pb, response_pb): """Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. """ logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = await self._base_request( '{}/chat/v1/{}'.format(BASE_URL, endpoint), 'application/x-protobuf', # Request body is Protocol Buffer. 'proto', # Response body is Protocol Buffer. request_pb.SerializeToString() ) try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError( 'Failed to decode base64 response: {}'.format(e) ) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError( 'Failed to decode Protocol Buffer response: {}'.format(e) ) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) ) async def _base_request(self, url, content_type, response_type, data): """Send a generic authenticated POST request. Args: url (str): URL of request. content_type (str): Request content type. response_type (str): The desired response format. Valid options are: 'json' (JSON), 'protojson' (pblite), and 'proto' (binary Protocol Buffer). 'proto' requires manually setting an extra header 'X-Goog-Encode-Response-If-Executable: base64'. data (str): Request body data. Returns: FetchResponse: Response containing HTTP code, cookies, and body. Raises: NetworkError: If the request fails. """ headers = { 'content-type': content_type, # This header is required for Protocol Buffer responses. It causes # them to be base64 encoded: 'X-Goog-Encode-Response-If-Executable': 'base64', } params = { # "alternative representation type" (desired response format). 'alt': response_type, # API key (required to avoid 403 Forbidden "Daily Limit for # Unauthenticated Use Exceeded. Continued use requires signup"). 'key': API_KEY, } res = await self._session.fetch( 'post', url, headers=headers, params=params, data=data, ) return res ########################################################################### # API request methods - wrappers for self._pb_request for calling # particular APIs. ########################################################################### async def add_user(self, add_user_request): """Invite users to join an existing group conversation.""" response = hangouts_pb2.AddUserResponse() await self._pb_request('conversations/adduser', add_user_request, response) return response async def create_conversation(self, create_conversation_request): """Create a new conversation.""" response = hangouts_pb2.CreateConversationResponse() await self._pb_request('conversations/createconversation', create_conversation_request, response) return response async def delete_conversation(self, delete_conversation_request): """Leave a one-to-one conversation. One-to-one conversations are "sticky"; they can't actually be deleted. This API clears the event history of the specified conversation up to ``delete_upper_bound_timestamp``, hiding it if no events remain. """ response = hangouts_pb2.DeleteConversationResponse() await self._pb_request('conversations/deleteconversation', delete_conversation_request, response) return response async def easter_egg(self, easter_egg_request): """Send an easter egg event to a conversation.""" response = hangouts_pb2.EasterEggResponse() await self._pb_request('conversations/easteregg', easter_egg_request, response) return response async def get_conversation(self, get_conversation_request): """Return conversation info and recent events.""" response = hangouts_pb2.GetConversationResponse() await self._pb_request('conversations/getconversation', get_conversation_request, response) return response async def get_entity_by_id(self, get_entity_by_id_request): """Return one or more user entities. Searching by phone number only finds entities when their phone number is in your contacts (and not always even then), and can't be used to find Google Voice contacts. """ response = hangouts_pb2.GetEntityByIdResponse() await self._pb_request('contacts/getentitybyid', get_entity_by_id_request, response) return response async def get_group_conversation_url(self, get_group_conversation_url_request): """Get URL to allow others to join a group conversation.""" response = hangouts_pb2.GetGroupConversationUrlResponse() await self._pb_request('conversations/getgroupconversationurl', get_group_conversation_url_request, response) return response async def get_self_info(self, get_self_info_request): """Return info about the current user.""" response = hangouts_pb2.GetSelfInfoResponse() await self._pb_request('contacts/getselfinfo', get_self_info_request, response) return response async def get_suggested_entities(self, get_suggested_entities_request): """Return suggested contacts.""" response = hangouts_pb2.GetSuggestedEntitiesResponse() await self._pb_request('contacts/getsuggestedentities', get_suggested_entities_request, response) return response async def query_presence(self, query_presence_request): """Return presence status for a list of users.""" response = hangouts_pb2.QueryPresenceResponse() await self._pb_request('presence/querypresence', query_presence_request, response) return response async def remove_user(self, remove_user_request): """Remove a participant from a group conversation.""" response = hangouts_pb2.RemoveUserResponse() await self._pb_request('conversations/removeuser', remove_user_request, response) return response async def rename_conversation(self, rename_conversation_request): """Rename a conversation. Both group and one-to-one conversations may be renamed, but the official Hangouts clients have mixed support for one-to-one conversations with custom names. """ response = hangouts_pb2.RenameConversationResponse() await self._pb_request('conversations/renameconversation', rename_conversation_request, response) return response async def search_entities(self, search_entities_request): """Return user entities based on a query.""" response = hangouts_pb2.SearchEntitiesResponse() await self._pb_request('contacts/searchentities', search_entities_request, response) return response async def send_chat_message(self, send_chat_message_request): """Send a chat message to a conversation.""" response = hangouts_pb2.SendChatMessageResponse() await self._pb_request('conversations/sendchatmessage', send_chat_message_request, response) return response async def modify_otr_status(self, modify_otr_status_request): """Enable or disable message history in a conversation.""" response = hangouts_pb2.ModifyOTRStatusResponse() await self._pb_request('conversations/modifyotrstatus', modify_otr_status_request, response) return response async def send_offnetwork_invitation( self, send_offnetwork_invitation_request ): """Send an email to invite a non-Google contact to Hangouts.""" response = hangouts_pb2.SendOffnetworkInvitationResponse() await self._pb_request('devices/sendoffnetworkinvitation', send_offnetwork_invitation_request, response) return response async def set_active_client(self, set_active_client_request): """Set the active client.""" response = hangouts_pb2.SetActiveClientResponse() await self._pb_request('clients/setactiveclient', set_active_client_request, response) return response async def set_conversation_notification_level( self, set_conversation_notification_level_request ): """Set the notification level of a conversation.""" response = hangouts_pb2.SetConversationNotificationLevelResponse() await self._pb_request( 'conversations/setconversationnotificationlevel', set_conversation_notification_level_request, response ) return response async def set_focus(self, set_focus_request): """Set focus to a conversation.""" response = hangouts_pb2.SetFocusResponse() await self._pb_request('conversations/setfocus', set_focus_request, response) return response async def set_group_link_sharing_enabled( self, set_group_link_sharing_enabled_request ): """Set whether group link sharing is enabled for a conversation.""" response = hangouts_pb2.SetGroupLinkSharingEnabledResponse() await self._pb_request('conversations/setgrouplinksharingenabled', set_group_link_sharing_enabled_request, response) return response async def set_presence(self, set_presence_request): """Set the presence status.""" response = hangouts_pb2.SetPresenceResponse() await self._pb_request('presence/setpresence', set_presence_request, response) return response async def set_typing(self, set_typing_request): """Set the typing status of a conversation.""" response = hangouts_pb2.SetTypingResponse() await self._pb_request('conversations/settyping', set_typing_request, response) return response async def sync_all_new_events(self, sync_all_new_events_request): """List all events occurring at or after a timestamp.""" response = hangouts_pb2.SyncAllNewEventsResponse() await self._pb_request('conversations/syncallnewevents', sync_all_new_events_request, response) return response async def sync_recent_conversations( self, sync_recent_conversations_request ): """Return info on recent conversations and their events.""" response = hangouts_pb2.SyncRecentConversationsResponse() await self._pb_request('conversations/syncrecentconversations', sync_recent_conversations_request, response) return response async def update_watermark(self, update_watermark_request): """Update the watermark (read timestamp) of a conversation.""" response = hangouts_pb2.UpdateWatermarkResponse() await self._pb_request('conversations/updatewatermark', update_watermark_request, response) return response UploadedImage = collections.namedtuple('UploadedImage', ['image_id', 'url']) """Details about an uploaded image. Args: image_id (str): Image ID of uploaded image. url (str): URL of uploaded image. """
{ "repo_name": "tdryer/hangups", "path": "hangups/client.py", "copies": "2", "size": "28753", "license": "mit", "hash": 2540699604365556700, "line_mean": 40.4906204906, "line_max": 79, "alpha_frac": 0.5820610023, "autogenerated": false, "ratio": 4.684424894102314, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 693 }
# Abstract class that provides functionality for the various exchange classes import logging import time import pickle import yaml from os.path import isfile from gi.repository import GLib from error import Error from downloader import DownloadCommand CURRENCY = { 'usd': '$', 'eur': '€', 'btc': 'B', 'thb': '฿', 'gbp': '£', 'eth': 'Ξ', 'cad': '$', 'jpy': '¥', 'cny': '元', 'inr': '₹' } CATEGORY = { 'cur': 'Now', 'bid': 'Bid', 'high': 'High', 'low': 'Low', 'ask': 'Ask', 'vol': 'Vol', 'first': 'First', 'avg': 'Avg' } class Exchange(object): active = True name = "Must be overwritten" code = "Must be overwritten" default_label = "Must be overwritten" def __init__(self, indicator=None): self.indicator = indicator self.downloader = indicator.coin.downloader self.timeout_id = None self.error = Error(self) self.started = False self.asset_pair = {} ## # Abstract methods to be overwritten by the child classes # @classmethod def _get_discovery_url(cls): pass @classmethod def _parse_discovery(cls, data): pass def _get_ticker_url(self): pass @classmethod def _parse_ticker(self, data): pass @classmethod def get_name(cls): return cls.name @classmethod def get_code(cls): return cls.code @classmethod def get_default_label(cls): return cls.default_label def get_asset_pair(self): return self.asset_pair def get_currency(self): return self.asset_pair.get('quote').lower() def get_symbol(self): return CURRENCY.get(self.get_currency(), self.get_currency().upper()) def get_icon(self): # set icon for asset if it exists asset = self.asset_pair.get('base', '').lower() asset_dir = "{}/resources/coin-icons/".format( self.indicator.coin.config.get('project_root')) if isfile(asset_dir + asset + '.png'): return asset_dir + asset + '.png' return asset_dir + 'unknown-coin.png' def get_volume_currency(self): return self.asset_pair.get( 'volumecurrency', self.asset_pair.get('base')) def set_asset_pair(self, base, quote): for ap in self.get_asset_pairs(): if ap.get('base').upper() == base.upper() and ap.get('quote').upper() == quote.upper(): self.asset_pair = ap break if not self.asset_pair: logging.warning("User.conf specifies unavailable asset pair, trying default. \ Run Asset Discovery again.") self.asset_pair = ap def set_asset_pair_from_code(self, code): for ap in self.get_asset_pairs(): if ap.get('pair').upper() == code.upper(): self.asset_pair = ap break if not self.asset_pair: logging.warning("User.conf specifies unavailable asset pair, trying default. \ Run Asset Discovery again.") self.asset_pair = {} @classmethod def find_asset_pair_by_code(cls, code): for ap in cls.get_asset_pairs(): if ap.get('pair') == code: return ap return {} @classmethod def find_asset_pair(cls, quote, base): for ap in cls.get_asset_pairs(): if ap.get('quote') == quote and ap.get('base') == base: return ap return {} @classmethod def get_datafile(cls): return "./coin/data/{}.cache".format(cls.get_code()) ## # Loads asset pairs from the config files or, # failing that, from the hard-coded lines # @classmethod def get_asset_pairs(cls): try: with open(cls.get_datafile(), "rb") as stream: asset_pairs = pickle.load(stream) return asset_pairs if asset_pairs else [] except IOError: # Faulty data file, return empty array return [] ## # Saves asset pairs to disk # @classmethod def store_asset_pairs(cls, asset_pairs): try: with open(cls.get_datafile(), 'wb') as stream: pickle.dump(asset_pairs, stream) except IOError: logging.error('Could not write to data file') ## # Discovers assets from the exchange's API url retrieved # through the instance-specific method _get_discovery_url() # @classmethod def discover_assets(cls, downloader, callback): if cls._get_discovery_url() == None: cls.store_asset_pairs(cls._parse_discovery(None)) else: command = DownloadCommand(cls._get_discovery_url(), callback) downloader.execute(command, cls._handle_discovery_result) ## # Deals with the result from the discovery HTTP request # Should probably be merged with _handle_result() later # @classmethod def _handle_discovery_result(cls, command): logging.debug("Response from {}: {}".format(command.url, command.error)) if command.error: cls._handle_discovery_error(f'{cls.name}: API server returned an error: {command.error}') if command.response: data = command.response if data.status_code != 200: cls._handle_discovery_error('API server returned an error: ' + str(data.status_code)) try: result = data.json() asset_pairs = cls._parse_discovery(result) cls.store_asset_pairs(asset_pairs) except Exception as e: cls._handle_discovery_error(str(e)) command.callback() # update the asset menus of all instances @classmethod def _handle_discovery_error(cls, msg): logging.warn("Asset Discovery: " + msg) ## # Start exchange # def start(self, error_refresh=None): if not self.started: self._check_price() self.started = True refresh = error_refresh if error_refresh else self.indicator.refresh_frequency self.timeout_id = GLib.timeout_add_seconds(refresh, self._check_price) return self ## # Stop exchange, reset errors # def stop(self): if self.timeout_id: GLib.source_remove(self.timeout_id) self.started = False self.indicator.alarm.deactivate() self.error.reset() return self ## # Restarts the exchange. This is necessary for restoring normal frequency as # False must be returned for the restart operation to be done only once # def restart(self): self.start() return False ## # This function is called frequently to get price updates from the API # def _check_price(self): self.pair = self.asset_pair.get('pair') timestamp = time.time() command = DownloadCommand(self._get_ticker_url(), self.indicator.update_gui) command.timestamp = timestamp command.error = self._handle_error command.validation = self.asset_pair self.downloader.execute(command, self._handle_result) logging.info('Request with TS: ' + str(timestamp)) if not self.error.is_ok(): self.timeout_id = None return self.error.is_ok() # continues the timer if there are no errors def _handle_error(self, error): self.error.log(str(error)) self.error.increment() # def _handle_result(self, data, validation, timestamp): def _handle_result(self, command): if not command.response: logging.info("No response from API server") return data = command.response # Check to see if the returning response is still valid # (user may have changed exchanges before the request finished) if not self.started: logging.info("Discarding packet for inactive exchange") return if command.validation is not self.asset_pair: # we've already moved on. logging.info("Discarding packet for wrong asset pair or exchange") return # also check if a newer response hasn't already been returned if command.timestamp < self.indicator.latest_response: # this is an older request logging.info("Discarding outdated packet") return if data.status_code != 200: self._handle_error('API server returned an error: ' + str(data.status_code)) return try: asset = data.json() except Exception: # Before, a KeyError happened when an asynchronous response comes in # for a previously selected asset pair (see upstream issue #27) self._handle_error('Invalid response for ' + str(self.pair)) return results = self._parse_ticker(asset) self.indicator.latest_response = command.timestamp logging.info( 'Response comes in with timestamp ' + str(command.timestamp) + ', last response at ' + str(self.indicator.latest_response)) for item in CATEGORY: if results.get(item): self.indicator.prices[item] = self._decimal_auto(results.get(item)) self.error.reset() GLib.idle_add(command.callback) ## # Rounds a number to a meaningful number of decimal places # and returns it as a string # def _decimal_auto(self, number): number = float(number) max_decimals = self.indicator.coin.settings.get("max_decimals", 8) significant_digits = self.indicator.coin.settings.get("significant_digits", 3) for decimals in range(0, max_decimals + 1): if number * (10 ** decimals) >= 10 ** (significant_digits - 1): break return ('{0:.' + str(decimals) + 'f}').format(number)
{ "repo_name": "nilgradisnik/coinprice-indicator", "path": "coin/exchange.py", "copies": "1", "size": "10003", "license": "mit", "hash": -2242239406417129500, "line_mean": 29.006006006, "line_max": 101, "alpha_frac": 0.5870696557, "autogenerated": false, "ratio": 4.1001231021748055, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5187192757874806, "avg_score": null, "num_lines": null }
//Abstract class that represents a connection to another computer //TODO: Packet ordering //Acks //RTT //Packet throttling //We have 464 bits for UDP //398 bits after our protocol headers class Connection(object): def __init__(self, ip, port, idhash=0): self.ip = id self.port = port self.idhash = idhash self.localpacketcount = 0 self.remotepacketcount = 0 self.receivedPackets = [] self.goodrtt = True def buildDatagram(protocol): datagram = PyDatagram() datagram.addUint8(protocol) datagram.addUint8(self.packetcount) datagram.addUint8(self.packetcount)//ack datagram.addUint16(self.packetcount)//ack32 datagram.addUint16(self.idhash) self.packetcount += 1 if self.packetcount > 255: self.packetcount = 0 return datagram def sequenceMoreRecent(s1, s2, maxSequence): //Used to determine if wrap around sequence is part of the next packetcount, ie 254, 255, 0, 1 //returns bool return (s1 > s2) and (s1 - s2 <= maxSequence/2) || (s2 > s1) and (s2-s1 > maxSequence/2) class ServerConnection(Connection): //Representation of a server on the client def login(username, hashpassword): datagram = self.buildDatagram(100) datagram.addString(username) datagram.addString(hashpassword) return datagram
{ "repo_name": "croxis/Panda-Core-Technology", "path": "LoginServer/connection.py", "copies": "1", "size": "1441", "license": "mit", "hash": -5264834068007468000, "line_mean": 31.0444444444, "line_max": 102, "alpha_frac": 0.6405274115, "autogenerated": false, "ratio": 3.792105263157895, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4932632674657895, "avg_score": null, "num_lines": null }
"""Abstract collection for experiment and integration of all experiment types.""" import itertools import re from snovault import ( abstract_collection, calculated_property, collection, load_schema ) from snovault.validators import ( validate_item_content_post, validate_item_content_put, validate_item_content_patch, validate_item_content_in_place, no_validate_item_content_post, no_validate_item_content_put, no_validate_item_content_patch ) from snovault.crud_views import ( collection_add, item_edit, ) from pyramid.view import view_config from snovault.attachment import ItemWithAttachment from snovault.util import debug_log from .base import ( Item, ALLOW_SUBMITTER_ADD, get_item_or_none, lab_award_attribution_embed_list ) from .dependencies import DependencyEmbedder EXP_CATEGORIZER_SCHEMA = { "title": "Categorizer", "description": "Fields used as an additional level of categorization for an experiment", "type": "object", "properties": { "field": { "type": "string", "description": "The name of the field as to be displayed in tables." }, "value": { "type": "string", "description": "The value displayed for the field" }, "combined": { "type": "string", "description": "Combined field:value string used for categorization of this experiment." } } } def _build_experiment_embedded_list(): """ Helper function intended to be used to create the embedded list for experiment. All types should implement a function like this going forward. """ pass @abstract_collection( name='experiments', unique_key='accession', acl=ALLOW_SUBMITTER_ADD, properties={ 'title': "Experiments", 'description': 'Listing of all types of experiments.', }) class Experiment(Item): """The main experiment class. As a special reminder on this item, which is a "parent type" item, that when you linkTo this and embed fields from it, you are picking up default_embeds (display_title) and must embed ALL possible fields that could impact it by analyzing the child types. """ # TODO: Review embeds of this item for correctness. - Will 1/26/2021 item_type = 'experiment' base_types = ['Experiment'] + Item.base_types schema = load_schema('encoded:schemas/experiment.json') name_key = 'accession' rev = { 'experiment_sets': ('ExperimentSet', 'experiments_in_set') } aggregated_items = { 'badges': [ 'messages', 'badge.commendation', 'badge.warning', 'badge.uuid', 'badge.@id', 'badge.badge_icon', 'badge.description' ] } embedded_list = Item.embedded_list + lab_award_attribution_embed_list + [ # Badge linkTo 'badges.badge.title', 'badges.badge.commendation', 'badges.badge.warning', 'badges.badge.badge_classification', 'badges.badge.description', 'badges.badge.badge_icon', 'badges.messages', # ExperimentSet linkTo 'experiment_sets.experimentset_type', 'experiment_sets.@type', 'experiment_sets.accession', # Publication linkTo 'produced_in_pub.ID', 'produced_in_pub.title', 'produced_in_pub.abstract', 'produced_in_pub.journal', 'produced_in_pub.authors', 'produced_in_pub.short_attribution', 'produced_in_pub.date_published', # Publication linkTo 'publications_of_exp.ID', 'publications_of_exp.title', 'publications_of_exp.abstract', 'publications_of_exp.journal', 'publications_of_exp.authors', 'publications_of_exp.short_attribution', 'publications_of_exp.date_published', # Biosample linkTo 'biosample.accession', 'biosample.modifications_summary', # XXX: investigate these calc props for needed embeds 'biosample.treatments_summary', 'biosample.biosource_summary', 'biosample.biosample_type', # Biosouce linkTo (lots) 'biosample.biosource.*', # OntologyTerm linkTo 'biosample.biosource.cell_line.slim_terms', 'biosample.biosource.cell_line.synonyms', 'biosample.biosource.cell_line.preferred_name', 'biosample.biosource.cell_line.term_name', 'biosample.biosource.cell_line.term_id', # OntologyTerm linkTo 'biosample.biosource.tissue.slim_terms', 'biosample.biosource.tissue.synonyms', 'biosample.biosource.tissue.preferred_name', 'biosample.biosource.tissue.term_name', 'biosample.biosource.tissue.term_id', # Organism linkTo 'biosample.biosource.individual.organism.name', 'biosample.biosource.individual.organism.scientific_name', # Modification linkTo 'biosample.modifications.modification_type', 'biosample.modifications.genomic_change', 'biosample.modifications.override_modification_name', 'biosample.modifications.description', # BioFeature linkTo 'biosample.modifications.target_of_mod.feature_type', 'biosample.modifications.target_of_mod.preferred_label', 'biosample.modifications.target_of_mod.cellular_structure', 'biosample.modifications.target_of_mod.organism_name', 'biosample.modifications.target_of_mod.relevant_genes', 'biosample.modifications.target_of_mod.feature_mods', 'biosample.modifications.target_of_mod.genome_location', # Treatment linkTo 'biosample.treatments.treatment_type', 'biosample.treatments.description', 'biosample.treatments.chemical', 'biosample.treatments.biological_agent', 'biosample.treatments.duration', 'biosample.treatments.duration_units', 'biosample.treatments.concentration', 'biosample.treatments.concentration_units', 'biosample.treatments.temperature', # Construct linkTo 'biosample.treatments.constructs.name', # Badge linkTo 'biosample.badges.badge.title', 'biosample.badges.badge.commendation', 'biosample.badges.badge.warning', 'biosample.badges.badge.badge_classification', 'biosample.badges.badge.description', 'biosample.badges.badge.badge_icon', 'biosample.badges.messages', # ExperimentType linkTo 'experiment_type.title', 'experiment_type.experiment_category', 'experiment_type.assay_subclass_short', # Files linkTo 'files.href', 'files.accession', 'files.uuid', 'files.file_size', 'files.upload_key', 'files.file_classification', 'files.file_type_detailed', 'files.paired_end', 'files.external_references.*', # FileFormat linkTo 'files.file_format.file_format', # QualityMetric linkTo 'files.quality_metric.Total Sequences', 'files.quality_metric.Sequence length', 'files.quality_metric.url', 'files.quality_metric.overall_quality_status', 'files.quality_metric.quality_metric_summary.*', # FileProcessed linkTo 'processed_files.href', 'processed_files.accession', 'processed_files.uuid', 'processed_files.file_size', 'processed_files.upload_key', 'processed_files.file_classification', 'processed_files.file_type_detailed', 'processed_files.external_references.*', # FileFormat linkTo 'processed_files.file_format.file_format', # QualityMetric linkTo 'processed_files.quality_metric.url', 'processed_files.quality_metric.overall_quality_status', 'processed_files.quality_metric.quality_metric_summary.*', 'processed_files.quality_metric.Total reads', 'processed_files.quality_metric.qc_list.value.Total reads', # Object 'other_processed_files.title', 'other_processed_files.description', 'other_processed_files.type', 'other_processed_files.files.notes_to_tsv', # File linkTo "processed_files.href", "processed_files.accession", "processed_files.uuid", "processed_files.file_size", "processed_files.upload_key", "processed_files.file_format", "processed_files.file_classification", "processed_files.file_type_detailed", "processed_files.external_references.*", "processed_files.quality_metric.url", "processed_files.quality_metric.overall_quality_status", "processed_files.quality_metric.quality_metric_summary.*", "processed_files.quality_metric.Total reads", "processed_files.quality_metric.qc_list.value.Total reads", "processed_files.notes_to_tsv", "processed_files.open_data_url", "processed_files.track_and_facet_info.*", "other_processed_files.files.href", "other_processed_files.files.file_type_detailed", "other_processed_files.files.file_size", "other_processed_files.files.higlass_uid", "other_processed_files.files.genome_assembly", "other_processed_files.files.status", "other_processed_files.files.notes_to_tsv", "other_processed_files.files.open_data_url", "other_processed_files.files.track_and_facet_info.*", "other_processed_files.files.quality_metric.url", "other_processed_files.files.quality_metric.overall_quality_status", "other_processed_files.files.quality_metric.quality_metric_summary.*", # FileFormat linkTo "other_processed_files.files.file_format.file_format", # last modification (since just time, no invalidation) "other_processed_files.files.last_modified.date_modified", # QualityMetric linkTo 'other_processed_files.files.quality_metric.url', 'other_processed_files.files.quality_metric.overall_quality_status', 'other_processed_files.files.quality_metric.quality_metric_summary.*', # FileReference linkTo "reference_files.accession", "reference_files.file_type_detailed", "reference_files.file_size", "reference_files.file_classification", "reference_files.status" ] def generate_mapid(self, experiment_type, num): delim = '_' mapid = str(type(self).__name__) mapid = mapid + delim + ''.join(experiment_type.split()) return mapid + delim + str(num) def has_bad_status(self, status): bad_statuses = ["revoked", "deleted", "obsolete", "replaced"] return status in bad_statuses def find_current_sop_map(self, experiment_type, sop_coll=None): maps = [] suffnum = 1 mapid = self.generate_mapid(experiment_type, suffnum) if sop_coll is not None: while(True): m = sop_coll.get(mapid) if not m: break if not self.has_bad_status(m.properties.get('status')): maps.append(m) suffnum += 1 mapid = self.generate_mapid(experiment_type, suffnum) if len(maps) > 0: sopmap = maps[-1] try: status = sopmap.properties.get('status') if not self.has_bad_status(status): return sopmap except AttributeError: # pragma: no cover pass return None def _update(self, properties, sheets=None): sop_coll = None exp_type = self.registry['collections']['ExperimentType'] exp_type_title = exp_type.get(properties['experiment_type']).properties['title'] if 'sop_mapping' in properties.keys(): # check if the SopMap has bad Status sop_coll = self.registry['collections']['SopMap'] currmap = properties['sop_mapping'].get('sopmap') if currmap: try: if self.has_bad_status(sop_coll.get(currmap)['status']): # delete mapping from properties del properties['sop_mapping'] except AttributeError: # want to do some logging print("CHECK STATUS OF SOP MAP") if 'sop_mapping' not in properties.keys(): if sop_coll is None: sop_coll = self.registry['collections']['SopMap'] # if sop_mapping field not present see if it should be sopmap = self.find_current_sop_map(exp_type_title, sop_coll) properties['sop_mapping'] = {} if sopmap is not None: sop_mapping = str(sopmap.uuid) properties['sop_mapping']['sop_map'] = sop_mapping properties['sop_mapping']['has_sop'] = "Yes" else: properties['sop_mapping']['has_sop'] = "No" # update self first to ensure 'experiment_relation' are stored in self.properties super(Experiment, self)._update(properties, sheets) DicRefRelation = { "controlled by": "control for", "derived from": "source for", "control for": "controlled by", "source for": "derived from", "input of": "has input", "has input": "input of" } acc = str(self.uuid) if 'experiment_relation' in properties.keys(): for relation in properties["experiment_relation"]: switch = relation["relationship_type"] rev_switch = DicRefRelation[switch] related_exp = relation["experiment"] relationship_entry = {"relationship_type": rev_switch, "experiment": acc} rel_dic = {'experiment_relation': [relationship_entry, ]} target_exp = self.collection.get(related_exp) # case one we don't have relations if 'experiment_relation' not in target_exp.properties.keys(): target_exp.properties.update(rel_dic) target_exp.update(target_exp.properties) else: # case two we have relations but not the one we need for target_relation in target_exp.properties['experiment_relation']: if target_relation['experiment'] == acc: break else: # make data for new experiemnt_relation target_exp.properties['experiment_relation'].append(relationship_entry) target_exp.update(target_exp.properties) @calculated_property(schema={ "title": "Experiment Sets", "description": "Experiment Sets to which this experiment belongs.", "type": "array", "exclude_from": ["submit4dn", "FFedit-create"], "items": { "title": "Experiment Set", "type": ["string", "object"], "linkTo": "ExperimentSet" } }) def experiment_sets(self, request): return self.rev_link_atids(request, "experiment_sets") @calculated_property(schema={ "title": "Produced in Publication", "description": "The Publication in which this Experiment was produced.", "type": "string", "linkTo": "Publication" }) def produced_in_pub(self, request, references=None): # references field is the boss if it exists # in each case selecting the first member if multiple if references: return references[0] esets = [request.embed('/', str(uuid), '@@object') for uuid in self.experiment_sets(request)] # replicate experiment set is the boss reps = [eset for eset in esets if 'ExperimentSetReplicate' in eset['@type']] if reps: return reps[0].get('produced_in_pub') @calculated_property(schema={ "title": "Publications", "description": "Publications associated with this Experiment.", "type": "array", "items": { "title": "Publication", "type": "string", "linkTo": "Publication" } }) def publications_of_exp(self, request): esets = [request.embed('/', str(uuid), '@@object') for uuid in self.experiment_sets(request)] pubs = list(set(itertools.chain.from_iterable([eset.get('publications_of_set', []) for eset in esets]))) return pubs @calculated_property(schema=EXP_CATEGORIZER_SCHEMA) def experiment_categorizer(self, request, experiment_type, digestion_enzyme=None, targeted_factor=None): ''' The generalish case for if there is a targeted_factor use that and if not use enzyme - more specific cases in specific schemas ''' out_dict = { "field": "Default", "value": None } types4control = [ '/experiment-types/damid-seq/', '/experiment-types/chip-seq/', '/experiment-types/nad-seq/', '/experiment-types/cut-n-run/' ] if experiment_type in types4control and not targeted_factor: out_dict['field'] = 'Target' out_dict['value'] = 'None (Control)' elif targeted_factor: tstring = '' for tf in targeted_factor: target_props = request.embed(tf, '@@object') tstring += ', {}'.format(target_props['display_title']) out_dict['field'] = 'Target' out_dict['value'] = tstring[2:] elif digestion_enzyme: obj = request.embed('/', digestion_enzyme, '@@object') out_dict['field'] = 'Enzyme' out_dict['value'] = obj['display_title'] if out_dict['value'] is not None: out_dict['combined'] = out_dict['field'] + ': ' + out_dict['value'] return out_dict class Collection(Item.Collection): pass @collection( name='experiments-hi-c', unique_key='accession', properties={ 'title': 'Experiments Hi-C', 'description': 'Listing Hi-C Experiments', }) class ExperimentHiC(Experiment): """The experiment class for Hi-C experiments.""" item_type = 'experiment_hi_c' schema = load_schema('encoded:schemas/experiment_hi_c.json') embedded_list = Experiment.embedded_list + [ # Enzyme linkTo 'digestion_enzyme.name' ] name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type, enzyme and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, digestion_enzyme=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) if digestion_enzyme: de_props = request.embed(digestion_enzyme, '@@object') de_name = de_props['name'] sum_str += (' with ' + de_name) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, digestion_enzyme=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, digestion_enzyme)) @collection( name='experiments-capture-c', unique_key='accession', properties={ 'title': 'Experiments Capture Hi-C', 'description': 'Listing Capture Hi-C Experiments', }) class ExperimentCaptureC(Experiment): """The experiment class for Capture Hi-C experiments.""" item_type = 'experiment_capture_c' schema = load_schema('encoded:schemas/experiment_capture_c.json') embedded_list = Experiment.embedded_list + [ # Enzyme linkTo 'digestion_enzyme.name', # Biofeature linkTo 'targeted_regions.target.feature_type', 'targeted_regions.target.preferred_label', 'targeted_regions.target.cellular_structure', 'targeted_regions.target.organism_name', # GenomicRegion linkTo 'targeted_regions.target.genome_location.genome_assembly', 'targeted_regions.target.genome_location.location_description', 'targeted_regions.target.genome_location.start_coordinate', 'targeted_regions.target.genome_location.end_coordinate', 'targeted_regions.target.genome_location.chromosome', # Object 'targeted_regions.target.feature_mods.mod_type', 'targeted_regions.target.feature_mods.mod_position', # Gene linkTo 'targeted_regions.target.relevant_genes.geneid', 'targeted_regions.target.relevant_genes.preferred_symbol', # File linkTo 'targeted_regions.oligo_file.file_format.*', 'targeted_regions.oligo_file.accession', 'targeted_regions.oligo_file.href', ] name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type, enzyme and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, digestion_enzyme=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) if digestion_enzyme: de_props = request.embed(digestion_enzyme, '@@object') de_name = de_props['name'] sum_str += (' with ' + de_name) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, digestion_enzyme=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, digestion_enzyme)) @calculated_property(schema=EXP_CATEGORIZER_SCHEMA) def experiment_categorizer(self, request, experiment_type, biosample, targeted_regions=None, digestion_enzyme=None): ''' Use targeted_regions information for capture-c''' if targeted_regions: regions = [] for tregion in targeted_regions: targetfeats = tregion.get('target', []) for feat in targetfeats: region = request.embed(feat, '@@object')['display_title'] regions.append(region) if regions: value = ', '.join(sorted(regions)) return { 'field': 'Target', 'value': value, 'combined': 'Target: ' + value } return super(ExperimentCaptureC, self).experiment_categorizer(request, experiment_type, digestion_enzyme) def _build_experiment_repliseq_embedded_list(): """ Helper function intended to be used to create the embedded list for ExperimentRepliseq. All types should implement a function like this going forward. """ antibody_embeds = DependencyEmbedder.embed_defaults_for_type( base_path='antibody', t='antibody') return ( Experiment.embedded_list + antibody_embeds ) @collection( name='experiments-repliseq', unique_key='accession', properties={ 'title': 'Experiments Repli-seq', 'description': 'Listing of Repli-seq Experiments', }) class ExperimentRepliseq(Experiment): """The experiment class for Repli-seq experiments.""" item_type = 'experiment_repliseq' schema = load_schema('encoded:schemas/experiment_repliseq.json') embedded_list = _build_experiment_repliseq_embedded_list() name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type, enzyme and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, cell_cycle_phase=None, stage_fraction=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) if cell_cycle_phase: sum_str += (' ' + cell_cycle_phase + '-phase') if stage_fraction: sum_str += (' ' + stage_fraction) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, cell_cycle_phase=None, stage_fraction=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, cell_cycle_phase, stage_fraction)) @calculated_property(schema=EXP_CATEGORIZER_SCHEMA) def experiment_categorizer(self, request, experiment_type, biosample, stage_fraction=None, total_fractions_in_exp=None): ''' Use combination of fraction and total number of fractions''' if stage_fraction: value = stage_fraction + ' of ' if not total_fractions_in_exp: fraction = 'an unspecified number of fractions' else: fraction = str(total_fractions_in_exp) + ' fractions' value = value + fraction return { 'field': 'Fraction', 'value': value, 'combined': 'Fraction: ' + value } else: return super(ExperimentRepliseq, self).experiment_categorizer(request, experiment_type) @collection( name='experiments-atacseq', unique_key='accession', properties={ 'title': 'Experiments ATAC-seq', 'description': 'Listing ATAC-seq Experiments', }) class ExperimentAtacseq(Experiment): """The experiment class for ATAC-seq experiments.""" item_type = 'experiment_atacseq' schema = load_schema('encoded:schemas/experiment_atacseq.json') embedded_list = Experiment.embedded_list name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample): sum_str = request.embed(experiment_type, '@@object')['display_title'] biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample)) def _build_experiment_chiapet_embedded_list(): """ Helper function intended to be used to create the embedded list for ExperimentChiapet. All types should implement a function like this going forward. """ antibody_embeds = DependencyEmbedder.embed_defaults_for_type( base_path='antibody', t='antibody') return ( Experiment.embedded_list + antibody_embeds ) @collection( name='experiments-chiapet', unique_key='accession', properties={ 'title': 'Experiments CHIA-pet', 'description': 'Listing CHIA-pet and PLAC-seq Experiments', }) class ExperimentChiapet(Experiment): """The experiment class for CHIA-pet and PLAC-seq experiments.""" item_type = 'experiment_chiapet' schema = load_schema('encoded:schemas/experiment_chiapet.json') embedded_list = _build_experiment_chiapet_embedded_list() name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, targeted_factor=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] if targeted_factor: tstring = '' for tf in targeted_factor: target_props = request.embed(tf, '@@object') tstring += ', {}'.format(target_props['display_title']) sum_str += (' against ' + tstring[2:]) biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, targeted_factor=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, targeted_factor)) @collection( name='experiments-damid', unique_key='accession', properties={ 'title': 'Experiments DAM-ID', 'description': 'Listing DAM-ID Experiments', }) class ExperimentDamid(Experiment): """The experiment class for DAM-ID experiments.""" item_type = 'experiment_damid' schema = load_schema('encoded:schemas/experiment_damid.json') embedded_list = Experiment.embedded_list name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, targeted_factor=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] if targeted_factor: if len(targeted_factor) == 1: tname = request.embed(targeted_factor[0], '@@object')['display_title'] fusion = tname.split(' ')[0] sum_str += (' with DAM-' + fusion) else: sum_str += (' with mulitiple DAM fusions') biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, targeted_factor=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, targeted_factor)) def _build_experiment_seq_embedded_list(): """ Helper function intended to be used to create the embedded list for ExperimentSeq. All types should implement a function like this going forward. """ antibody_embeds = DependencyEmbedder.embed_defaults_for_type( base_path='antibody', t='antibody') return ( Experiment.embedded_list + antibody_embeds ) @collection( name='experiments-seq', unique_key='accession', properties={ 'title': 'Experiments CHIPseq, RNAseq ...', 'description': 'Listing of ChIP and RNA seq type experiments', }) class ExperimentSeq(ItemWithAttachment, Experiment): """The experiment class for ChIPseq and RNAseq and potentially other types.""" item_type = 'experiment_seq' schema = load_schema('encoded:schemas/experiment_seq.json') embedded_list = _build_experiment_seq_embedded_list() name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, targeted_factor=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] if targeted_factor: tstring = '' for tf in targeted_factor: target_props = request.embed(tf, '@@object') tstring += ', {}'.format(target_props['display_title']) sum_str += (' against ' + tstring[2:]) biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, targeted_factor=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, targeted_factor)) def _build_experiment_tsaseq_embedded_list(): """ Helper function intended to be used to create the embedded list for ExperimentTsaseq. All types should implement a function like this going forward. """ antibody_embeds = DependencyEmbedder.embed_defaults_for_type( base_path='antibody', t='antibody') secondary_antibody_embeds = DependencyEmbedder.embed_defaults_for_type( base_path='secondary_antibody', t='antibody') return ( Experiment.embedded_list + antibody_embeds + secondary_antibody_embeds ) @collection( name='experiments-tsaseq', unique_key='accession', properties={ 'title': 'Experiments TSA-Seq', 'description': 'Listing of TSA-seq type experiments', }) class ExperimentTsaseq(ItemWithAttachment, Experiment): """The experiment class for TSA-seq.""" item_type = 'experiment_tsaseq' schema = load_schema('encoded:schemas/experiment_tsaseq.json') embedded_list = _build_experiment_tsaseq_embedded_list() name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample, targeted_factor=None): sum_str = request.embed(experiment_type, '@@object')['display_title'] if targeted_factor: tstring = '' for tf in targeted_factor: target_props = request.embed(tf, '@@object') tstring += ', {}'.format(target_props['display_title']) sum_str += (' against ' + tstring[2:]) biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample, targeted_factor=None): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample, targeted_factor)) def _build_experiment_mic_embedded_list(): """ Helper function intended to be used to create the embedded list for ExperimentMic. All types should implement a function like this going forward. """ imaging_path_embeds = DependencyEmbedder.embed_for_type( base_path='imaging_paths.path', t='imaging_path', additional_embeds=['imaging_rounds', 'experiment_type.title']) return (Experiment.embedded_list + imaging_path_embeds + [ # Files linkTo 'files.accession', # detect display_title diff # MicroscopeSettings linkTo 'files.microscope_settings.ch00_light_source_center_wl', 'files.microscope_settings.ch01_light_source_center_wl', 'files.microscope_settings.ch02_light_source_center_wl', 'files.microscope_settings.ch03_light_source_center_wl', 'files.microscope_settings.ch04_light_source_center_wl', 'files.microscope_settings.ch00_lasers_diodes', 'files.microscope_settings.ch01_lasers_diodes', 'files.microscope_settings.ch02_lasers_diodes', 'files.microscope_settings.ch03_lasers_diodes', 'files.microscope_settings.ch04_lasers_diodes', # Image linkTo 'sample_image.title', 'sample_image.caption', 'sample_image.microscopy_file.accession', 'sample_image.microscopy_file.omerolink', 'sample_image.attachment.href', 'sample_image.attachment.type', 'sample_image.attachment.md5sum', 'sample_image.attachment.download', 'sample_image.attachment.width', 'sample_image.attachment.height', ] ) @collection( name='experiments-mic', unique_key='accession', properties={ 'title': 'Microscopy Experiments', 'description': 'Listing of Microscopy Experiments', }) class ExperimentMic(Experiment): """The experiment class for Microscopy experiments.""" item_type = 'experiment_mic' schema = load_schema('encoded:schemas/experiment_mic.json') embedded_list = _build_experiment_mic_embedded_list() name_key = 'accession' @calculated_property(schema={ "title": "Experiment summary", "description": "Summary of the experiment, including type, enzyme and biosource.", "type": "string", }) def experiment_summary(self, request, experiment_type, biosample): sum_str = request.embed(experiment_type, '@@object')['display_title'] biosamp_props = request.embed(biosample, '@@object') biosource = biosamp_props['biosource_summary'] sum_str += (' on ' + biosource) return sum_str @calculated_property(schema={ "title": "Display Title", "description": "A calculated title for every object in 4DN", "type": "string" }) def display_title(self, request, experiment_type, biosample): return self.add_accession_to_title(self.experiment_summary(request, experiment_type, biosample)) @calculated_property(schema=EXP_CATEGORIZER_SCHEMA) def experiment_categorizer(self, request, experiment_type, biosample, imaging_paths=None): ''' Use the target(s) in the imaging path''' if imaging_paths: path_targets = [] for pathobj in imaging_paths: path = get_item_or_none(request, pathobj['path'], 'imaging_path') for target in path.get('target', []): summ = get_item_or_none(request, target, 'bio_feature')['display_title'] path_targets.append(summ) if path_targets: value = [] sum_targets = {} for target in path_targets: # check if target starts with numbers, e.g. '50 TADs', '40 TADs' # sum them if there are more: '90 TADs' split_target = re.split(r'(^[0-9]+)', target, maxsplit=1) if len(split_target) > 1: t_num, t_name = split_target[1:3] sum_targets[t_name] = sum_targets.setdefault(t_name, 0) + int(t_num) elif target not in value: value.append(target) if sum_targets: value = [str(n) + t for t, n in sum_targets.items()] + value value = ', '.join(value) return { 'field': 'Target', 'value': value, 'combined': 'Target: ' + value } return super(ExperimentMic, self).experiment_categorizer(request, experiment_type) @calculated_property(context=Experiment, category='action') def clone(context, request): """If the user submits for any lab, allow them to clone This is like creating, but keeps previous fields""" if request.has_permission('create'): return { 'name': 'clone', 'title': 'Clone', 'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info), 'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)), } def validate_exp_type_validity_for_experiment(context, request): """Check if the specified experiment type (e.g. in situ Hi-C) is allowed for the experiment schema (e.g. ExperimentHiC). """ data = request.json if 'experiment_type' in data: exp_type_item = get_item_or_none(request, data['experiment_type'], 'experiment-types') if not exp_type_item: # pragma: no cover # item level validation will take care of generating the error return exp_type_name = exp_type_item['title'] allowed_types = exp_type_item.get('valid_item_types', []) exp = context.type_info.name if exp not in allowed_types: msg = 'Experiment Type {} is not allowed for {}'.format(exp_type_name, exp) request.errors.add('body', 'Experiment: invalid experiment type', msg) else: request.validated.update({}) @view_config(context=Experiment.Collection, permission='add', request_method='POST', validators=[validate_item_content_post, validate_exp_type_validity_for_experiment]) @view_config(context=Experiment.Collection, permission='add_unvalidated', request_method='POST', validators=[no_validate_item_content_post], request_param=['validate=false']) @debug_log def experiment_add(context, request, render=None): return collection_add(context, request, render) @view_config(context=Experiment, permission='edit', request_method='PUT', validators=[validate_item_content_put, validate_exp_type_validity_for_experiment]) @view_config(context=Experiment, permission='edit', request_method='PATCH', validators=[validate_item_content_patch, validate_exp_type_validity_for_experiment]) @view_config(context=Experiment, permission='edit_unvalidated', request_method='PUT', validators=[no_validate_item_content_put], request_param=['validate=false']) @view_config(context=Experiment, permission='edit_unvalidated', request_method='PATCH', validators=[no_validate_item_content_patch], request_param=['validate=false']) @view_config(context=Experiment, permission='index', request_method='GET', validators=[validate_item_content_in_place, validate_exp_type_validity_for_experiment], request_param=['check_only=true']) @debug_log def experiment_edit(context, request, render=None): return item_edit(context, request, render)
{ "repo_name": "hms-dbmi/fourfront", "path": "src/encoded/types/experiment.py", "copies": "2", "size": "43569", "license": "mit", "hash": 4173908480317674500, "line_mean": 38.5004533092, "line_max": 138, "alpha_frac": 0.6191099176, "autogenerated": false, "ratio": 3.9078841151672794, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000758795151638835, "num_lines": 1103 }
"""Abstract definition of database connections""" import functools import pathlib @functools.lru_cache(maxsize=None) def db(alias): """Returns a database configuration by alias""" from . import config databases = config.databases() if alias not in databases: raise KeyError(f'database alias "{alias}" not configured') return databases[alias] class DB: """Generic database connection definition""" def __repr__(self) -> str: return (f'<{self.__class__.__name__}: ' + ', '.join([f'{var}={"*****" if (var == "password" or "secret" in var) else getattr(self, var)}' for var in vars(self) if getattr(self, var)]) + '>') class PostgreSQLDB(DB): def __init__(self, host: str = None, port: int = None, database: str = None, user: str = None, password: str = None, sslmode: str = None, sslrootcert: str = None, sslcert: str = None, sslkey: str = None): """ Connection information for a PostgreSQL database For the SSL options, see https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION """ self.host = host self.database = database self.port = port self.user = user self.password = password self.sslmode = sslmode self.sslrootcert = sslrootcert self.sslcert = sslcert self.sslkey = sslkey class RedshiftDB(PostgreSQLDB): def __init__(self, host: str = None, port: int = None, database: str = None, user: str = None, password: str = None, aws_access_key_id=None, aws_secret_access_key=None, aws_s3_bucket_name=None): """ Connection information for a RedShift database The aws_* parameters are for copying to Redshift from stdin via an s3 bucket (requires the https://pypi.org/project/awscli/) package to be installed) """ self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key self.aws_s3_bucket_name = aws_s3_bucket_name super(RedshiftDB, self).__init__(host, port, database, user, password) class BigQueryDB(DB): def __init__(self, service_account_json_file_name: str, location: str = None, project: str = None, dataset: str = None, gcloud_gcs_bucket_name=None, use_legacy_sql: bool = False): """ Connection information for a BigQueryDB database Enabling the BigQuery API and Service account json credentials are required. For more: https://cloud.google.com/bigquery/docs/quickstarts/quickstart-client-libraries#before-you-begin Args: service_account_json_file_name: The name of the private key file provided by Google when creating a service account (in json format) location: Default geographic location to use when creating datasets or determining where jobs should run project: Default project to use for requests. dataset: Default dataset to use for requests. gcloud_gcs_bucket_name: The Google Cloud Storage bucked used as cache for loading data use_legacy_sql: (default: false) If true, use the old BigQuery SQL dialect is used. """ self.service_account_json_file_name = service_account_json_file_name self.location = location self.project = project self.dataset = dataset self.gcloud_gcs_bucket_name = gcloud_gcs_bucket_name self.use_legacy_sql = use_legacy_sql class MysqlDB(DB): def __init__(self, host: str = None, port: int = None, database: str = None, user: str = None, password: str = None, ssl: bool = None, charset: str = None): self.host = host self.database = database self.port = port self.user = user self.password = password self.ssl = ssl self.charset = charset class SQLServerDB(DB): def __init__(self, host: str = None, port: int = None, database: str = None, user: str = None, password: str = None, odbc_driver: str = None): # NOTE: The support for named instances is not added because the command `sqsh` does not support it self.host = host self.port = port self.database = database self.user = user self.password = password if odbc_driver is None: self.odbc_driver = 'ODBC Driver 17 for SQL Server' # default odbc driver else: self.odbc_driver = odbc_driver class OracleDB(DB): def __init__(self, host: str = None, port: int = 0, endpoint: str = None, user: str = None, password: str = None): self.host = host self.port = port self.endpoint = endpoint self.user = user self.password = password class SQLiteDB(DB): def __init__(self, file_name: pathlib.Path) -> None: self.file_name = file_name
{ "repo_name": "mara/mara-db", "path": "mara_db/dbs.py", "copies": "1", "size": "5021", "license": "mit", "hash": -1206131181235530200, "line_mean": 37.9224806202, "line_max": 144, "alpha_frac": 0.6122286397, "autogenerated": false, "ratio": 4.013589128697042, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0018129628165835412, "num_lines": 129 }
# Abstract Duck class with subclasses from .fly_behaviours import FlyNoWay, FlyWithWings from .quack_behaviours import Quack # Abstract Duck class class Duck: def __init__(self): self.quack_behaviour = None self.fly_behaviour = None def perform_quack(self): self.quack_behaviour.quack() def perform_fly(self): self.fly_behaviour.fly() def set_quack_behaviour(self, qb): self.quack_behaviour = qb def set_fly_behaviour(self, fb): self.fly_behaviour = fb def swim(self): print("All ducks float, even decoys!") def display(self): raise NotImplementedError() class ModelDuck(Duck): def __init__(self): super(Duck, self).__init__() self.fly_behaviour = FlyNoWay() self.quack_behaviour = Quack() def display(self): print("I'm a model duck") class MallardDuck(Duck): def __init__(self): super(MallardDuck, self).__init__() self.quack_behaviour = Quack() self.fly_behaviour = FlyWithWings()
{ "repo_name": "ajbrzoz/Head-First-Design-Patterns", "path": "strategy/ducks.py", "copies": "1", "size": "1086", "license": "mit", "hash": 3818985126727487000, "line_mean": 22.1063829787, "line_max": 50, "alpha_frac": 0.6040515654, "autogenerated": false, "ratio": 3.2612612612612613, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4365312826661261, "avg_score": null, "num_lines": null }
"""Abstract factory example.""" import abc import argparse from patterns.abstractf import AbstractFactory class Greeting(metaclass=abc.ABCMeta): """Abstract greeting class.""" def __init__(self): """Make a Greeting object.""" pass @abc.abstractmethod def greet(self): """Print greeting to sys.stdout (abstract method).""" pass class EnglishGreeting(Greeting): """English greeting class.""" def greet(self): """Print English greeting to sys.stdout.""" print('Hello!') class MandarinGreeting(Greeting): """Mandarin greeting class.""" def greet(self): """Print Mandarin greeting to sys.stdout.""" print('Ni hao!') def chooser(): """Abstract factory chooser.""" if lang == 'english': return EnglishGreeting elif lang == 'mandarin': return MandarinGreeting arg_parser = argparse.ArgumentParser(description='give a greeting') arg_parser.add_argument('language', choices=['english', 'mandarin'], help='language to give greeting in') lang = arg_parser.parse_args().language greeting_factory = AbstractFactory(chooser) greeting_factory().greet()
{ "repo_name": "jmanuel1/patterns", "path": "examples/abstractf.py", "copies": "1", "size": "1205", "license": "mit", "hash": 4728653018725022000, "line_mean": 20.9090909091, "line_max": 68, "alpha_frac": 0.6423236515, "autogenerated": false, "ratio": 4.043624161073826, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 55 }
"""`AbstractFactory` providers example.""" import abc import dataclasses import random from typing import List from dependency_injector import containers, providers class AbstractCacheClient(metaclass=abc.ABCMeta): ... @dataclasses.dataclass class RedisCacheClient(AbstractCacheClient): host: str port: int db: int @dataclasses.dataclass class MemcachedCacheClient(AbstractCacheClient): hosts: List[str] port: int prefix: str @dataclasses.dataclass class Service: cache: AbstractCacheClient class Container(containers.DeclarativeContainer): cache_client_factory = providers.AbstractFactory(AbstractCacheClient) service_factory = providers.Factory( Service, cache=cache_client_factory, ) if __name__ == '__main__': container = Container() cache_type = random.choice(['redis', 'memcached']) if cache_type == 'redis': container.cache_client_factory.override( providers.Factory( RedisCacheClient, host='localhost', port=6379, db=0, ), ) elif cache_type == 'memcached': container.cache_client_factory.override( providers.Factory( MemcachedCacheClient, hosts=['10.0.1.1'], port=11211, prefix='my_app', ), ) service = container.service_factory() print(service.cache) # The output depends on cache_type variable value. # # If the value is 'redis': # RedisCacheClient(host='localhost', port=6379, db=0) # # If the value is 'memcached': # MemcachedCacheClient(hosts=['10.0.1.1'], port=11211, prefix='my_app') # # If the value is None: # Error: AbstractFactory(<class '__main__.AbstractCacheClient'>) must be # overridden before calling
{ "repo_name": "ets-labs/python-dependency-injector", "path": "examples/providers/abstract_factory.py", "copies": "2", "size": "1880", "license": "bsd-3-clause", "hash": -9022913677638524000, "line_mean": 22.7974683544, "line_max": 76, "alpha_frac": 0.6234042553, "autogenerated": false, "ratio": 4.168514412416852, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5791918667716851, "avg_score": null, "num_lines": null }
"""`AbstractFactory` providers example.""" import cache import dependency_injector.providers as providers # Define abstract cache client factory: cache_client_factory = providers.AbstractFactory(cache.AbstractCacheClient) if __name__ == '__main__': # Override abstract factory with redis client factory: cache_client_factory.override( providers.Factory( cache.RedisCacheClient, host='localhost', port=6379, db=0, ), ) redis_cache = cache_client_factory() print(redis_cache) # <cache.RedisCacheClient object at 0x10975bc50> # Override abstract factory with memcache client factory: cache_client_factory.override( providers.Factory( cache.MemcacheCacheClient, hosts=['10.0.1.1', '10.0.1.2', '10.0.1.3'], port=11211, prefix='my_app', ), ) memcache_cache = cache_client_factory() print(memcache_cache) # <cache.MemcacheCacheClient object at 0x10975bc90>
{ "repo_name": "rmk135/dependency_injector", "path": "examples/providers/abstract_factory/example.py", "copies": "2", "size": "1032", "license": "bsd-3-clause", "hash": -2189781245469923800, "line_mean": 27.6666666667, "line_max": 75, "alpha_frac": 0.6346899225, "autogenerated": false, "ratio": 3.954022988505747, "config_test": false, "has_no_keywords": true, "few_assignments": false, "quality_score": 0.5588712911005748, "avg_score": null, "num_lines": null }
# Abstract - Genetic Algorithm Learning # Problem statement: Given a sequence of numbers of length n generate the best # candidate sequence whose sum is equal to X # # terms in genetic algorithm # * Individual or Candidate : Each suggested solution for a genetic algorithm is referred to as individual.In our current problem each list of numbers is an individual. # * Population : The collection of all individuals is referred to as a population # * Fitness Function : A method which operates on individuals of a population and assigns score to them whether they achieve the desired optimal target value # * Evolve Function or Algorithm : from random import randint from operator import add def individual(length,min,max): return [randint(min,max) for x in range(length)] #print(individual(5,0,100)) def population(count,length,min,max): """count : the number of individuals in a population length : the length of each individual min : minimum range of individuals value max : maximum range of individuals value """ return [ individual(length,min,max) for x in range(count) ] def fitness(individual,target): """ individual : the individual to evaluate (a list) target : the target sum the individuals are aiming for """ sum = reduce(add,individual,0) return abs(target-sum) def fitness_for_pop(pop,target): #pop = population(3,5,0,100) return [fitness(x,target) for x in pop] def evolve(pop,target): x = fitness_for_pop(pop,target) return (min(x),max(x)) def grade(pop,target): """ Find average of fitness of population """ summed = reduce(add,(fitness(x,target) for x in pop), 0 ) return summed/len(pop)*1.0 pop = population(4,5,0,100) target = 200 print fitness_for_pop(pop,200) print(evolve(pop,target)) #print grade(pop,target) #print pop # total = 500 # distance_list = [] # for x in pop: # s = 0 # for val in x: # s = s + val # distance_list.append(abs(s-total)) # x = individual(5,0,100) # print fitness(x,200) # #print(distance_list)
{ "repo_name": "creativcoder/AlgorithmicProblems", "path": "Python/genetic.py", "copies": "1", "size": "2003", "license": "mit", "hash": 2564486660046518000, "line_mean": 23.4268292683, "line_max": 168, "alpha_frac": 0.7224163754, "autogenerated": false, "ratio": 3.179365079365079, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4401781454765079, "avg_score": null, "num_lines": null }
"""Abstract graphs in ReGraph. This module contains abstract classes for graph objects in ReGraph. Such graph objects represent simple graphs with dictionary-like attributes on nodes and edges. """ import json import os import warnings from abc import ABC, abstractmethod from regraph.exceptions import (ReGraphError, GraphError, GraphAttrsWarning, ) from regraph.utils import (load_nodes_from_json, load_edges_from_json, generate_new_id, normalize_attrs, safe_deepcopy_dict, set_attrs, add_attrs, remove_attrs, merge_attributes, keys_by_value, ) class Graph(ABC): """Abstract class for graph objects in ReGraph.""" @abstractmethod def nodes(self, data=False): """Return the list of nodes.""" pass @abstractmethod def edges(self, data=False): """Return the list of edges.""" pass @abstractmethod def get_node(self, n): """Get node attributes. Parameters ---------- s : hashable Source node id. t : hashable, Target node id. """ pass @abstractmethod def get_edge(self, s, t): """Get edge attributes. Parameters ---------- s : hashable Source node id. t : hashable Target node id. """ pass @abstractmethod def add_node(self, node_id, attrs=None): """Abstract method for adding a node. Parameters ---------- node_id : hashable Prefix that is prepended to the new unique name. attrs : dict, optional Node attributes. """ pass @abstractmethod def remove_node(self, node_id): """Remove node. Parameters ---------- node_id : hashable Node to remove. """ pass @abstractmethod def add_edge(self, s, t, attrs=None, **attr): """Add an edge to a graph. Parameters ---------- s : hashable Source node id. t : hashable Target node id. attrs : dict Edge attributes. """ pass @abstractmethod def remove_edge(self, source_id, target_id): """Remove edge from the graph. Parameters ---------- s : hashable Source node id. t : hashable Target node id. """ pass @abstractmethod def update_node_attrs(self, node_id, attrs, normalize=True): """Update attributes of a node. Parameters ---------- node_id : hashable Node to update. attrs : dict New attributes to assign to the node """ pass @abstractmethod def update_edge_attrs(self, s, t, attrs, normalize=True): """Update attributes of a node. Parameters ---------- s : hashable Source node of the edge to update. t : hashable Target node of the edge to update. attrs : dict New attributes to assign to the node """ pass @abstractmethod def successors(self, node_id): """Return the set of successors.""" pass @abstractmethod def predecessors(self, node_id): """Return the set of predecessors.""" pass @abstractmethod def find_matching(self, pattern, nodes=None): """Find matching of a pattern in a graph.""" pass def print_graph(self): """Pretty-print the graph.""" print("\nNodes:\n") for n in self.nodes(): print(n, " : ", self.get_node(n)) print("\nEdges:\n") for (n1, n2) in self.edges(): print(n1, '->', n2, ' : ', self.get_edge(n1, n2)) return def __str__(self): """String representation of the graph.""" return "Graph({} nodes, {} edges)".format( len(self.nodes()), len(self.edges())) def __eq__(self, graph): """Eqaulity operator. Parameters ---------- graph : regraph.Graph Another graph object Returns ------- bool True if two graphs are equal, False otherwise. """ if set(self.nodes()) != set(graph.nodes()): return False if set(self.edges()) != set(graph.edges()): return False for node in self.nodes(): if self.get_node(node) != graph.get_node(node): return False for s, t in self.edges(): if self.get_edge(s, t) != graph.get_edge(s, t): return False return True def __ne__(self, graph): """Non-equality operator.""" return not (self == graph) def get_node_attrs(self, n): """Get node attributes. Parameters ---------- n : hashable Node id. """ return self.get_node(n) def get_edge_attrs(self, s, t): """Get edge attributes. Parameters ---------- graph : networkx.(Di)Graph s : hashable, source node id. t : hashable, target node id. """ return self.get_edge(s, t) def in_edges(self, node_id): """Return the set of in-coming edges.""" return [(p, node_id) for p in self.predecessors(node_id)] def out_edges(self, node_id): """Return the set of out-going edges.""" return [(node_id, s) for s in self.successors(node_id)] def add_nodes_from(self, node_list): """Add nodes from a node list. Parameters ---------- node_list : iterable Iterable containing a collection of nodes, optionally, with their attributes """ for n in node_list: if type(n) != str: try: node_id, node_attrs = n self.add_node(node_id, node_attrs) except (TypeError, ValueError): self.add_node(n) else: self.add_node(n) def add_edges_from(self, edge_list): """Add edges from an edge list. Parameters ---------- edge_list : iterable Iterable containing a collection of edges, optionally, with their attributes """ for e in edge_list: if len(e) == 2: self.add_edge(e[0], e[1]) elif len(e) == 3: self.add_edge(e[0], e[1], e[2]) else: raise ReGraphError( "Was expecting 2 or 3 elements per tuple, got %s." % str(len(e)) ) def exists_edge(self, s, t): """Check if an edge exists. Parameters ---------- s : hashable Source node id. t : hashable Target node id. """ # print("\n\n\n\n\n\n\n\n\n\n", s, t, self.edges()) return((s, t) in self.edges()) def set_node_attrs(self, node_id, attrs, normalize=True, update=True): """Set node attrs. Parameters ---------- node_id : hashable Id of the node to update attrs : dict Dictionary with new attributes to set normalize : bool, optional Flag, when set to True attributes are normalized to be set-valued. True by default update : bool, optional Flag, when set to True attributes whose keys are not present in attrs are removed, True by default Raises ------ GraphError If a node `node_id` does not exist. """ if node_id not in self.nodes(): raise GraphError("Node '{}' does not exist!".format(node_id)) node_attrs = safe_deepcopy_dict(self.get_node(node_id)) set_attrs(node_attrs, attrs, normalize, update) self.update_node_attrs(node_id, node_attrs, normalize) def add_node_attrs(self, node, attrs): """Add new attributes to a node. Parameters ---------- node : hashable Id of a node to add attributes to. attrs : dict Attributes to add. Raises ------ GraphError If a node `node_id` does not exist. """ if node not in self.nodes(): raise GraphError("Node '{}' does not exist!".format(node)) node_attrs = safe_deepcopy_dict(self.get_node(node)) add_attrs(node_attrs, attrs, normalize=True) self.update_node_attrs(node, node_attrs) def remove_node_attrs(self, node_id, attrs): """Remove attrs of a node specified by attrs_dict. Parameters ---------- node_id : hashable Node whose attributes to remove. attrs : dict Dictionary with attributes to remove. Raises ------ GraphError If a node with the specified id does not exist. """ if node_id not in self.nodes(): raise GraphError("Node '%s' does not exist!" % str(node_id)) elif attrs is None: warnings.warn( "You want to remove attrs from '{}' with an empty attrs_dict!".format( node_id), GraphAttrsWarning ) node_attrs = safe_deepcopy_dict(self.get_node(node_id)) remove_attrs(node_attrs, attrs, normalize=True) self.update_node_attrs(node_id, node_attrs) def set_edge_attrs(self, s, t, attrs, normalize=True, update=True): """Set edge attrs. Parameters ---------- attrs : dict Dictionary with new attributes to set normalize : bool, optional Flag, when set to True attributes are normalized to be set-valued. True by default update : bool, optional Flag, when set to True attributes whose keys are not present in attrs are removed, True by default Raises ------ GraphError If an edge between `s` and `t` does not exist. """ if not self.exists_edge(s, t): raise GraphError( "Edge {}->{} does not exist".format(s, t)) edge_attrs = safe_deepcopy_dict(self.get_edge(s, t)) set_attrs(edge_attrs, attrs, normalize, update) self.update_edge_attrs(s, t, edge_attrs, normalize=normalize) def set_edge(self, s, t, attrs, normalize=True, update=True): """Set edge attrs. Parameters ---------- s : hashable Source node id. t : hashable Target node id. attrs : dictionary Dictionary with attributes to set. Raises ------ GraphError If an edge between `s` and `t` does not exist. """ self.set_edge_attrs(s, t, attrs, normalize, update) def add_edge_attrs(self, s, t, attrs): """Add attributes of an edge in a graph. Parameters ---------- s : hashable Source node id. t : hashable Target node id. attrs : dict Dictionary with attributes to remove. Raises ------ GraphError If an edge between `s` and `t` does not exist. """ if not self.exists_edge(s, t): raise GraphError( "Edge {}->{} does not exist".format(s, t)) edge_attrs = safe_deepcopy_dict(self.get_edge(s, t)) add_attrs(edge_attrs, attrs, normalize=True) self.update_edge_attrs(s, t, edge_attrs) def remove_edge_attrs(self, s, t, attrs): """Remove attrs of an edge specified by attrs. Parameters ---------- s : hashable Source node id. t : hashable Target node id. attrs : dict Dictionary with attributes to remove. Raises ------ GraphError If an edge between `s` and `t` does not exist. """ if not self.exists_edge(s, t): raise GraphError( "Edge {}->{} does not exist".format(s, t)) edge_attrs = safe_deepcopy_dict(self.get_edge(s, t)) remove_attrs(edge_attrs, attrs, normalize=True) self.update_edge_attrs(s, t, edge_attrs) def clone_node(self, node_id, name=None): """Clone node. Create a new node, a copy of a node with `node_id`, and reconnect it with all the adjacent nodes of `node_id`. Parameters ---------- node_id : hashable, Id of a node to clone. name : hashable, optional Id for the clone, if is not specified, new id will be generated. Returns ------- new_node : hashable Id of the new node corresponding to the clone Raises ------ GraphError If node wiht `node_id` does not exists or a node with `name` (clone's name) already exists. """ if node_id not in self.nodes(): raise GraphError("Node '{}' does not exist!".format(node_id)) # generate new name for a clone if name is None: i = 1 new_node = str(node_id) + str(i) while new_node in self.nodes(): i += 1 new_node = str(node_id) + str(i) else: if name in self.nodes(): raise GraphError("Node '{}' already exists!".format(name)) else: new_node = name self.add_node(new_node, self.get_node(node_id)) # Connect all the edges self.add_edges_from( set([(n, new_node) for n, _ in self.in_edges(node_id) if (n, new_node) not in self.edges()])) self.add_edges_from( set([(new_node, n) for _, n in self.out_edges(node_id) if (new_node, n) not in self.edges()])) # Copy the attributes of the edges for s, t in self.in_edges(node_id): self.set_edge( s, new_node, safe_deepcopy_dict(self.get_edge(s, t))) for s, t in self.out_edges(node_id): self.set_edge( new_node, t, safe_deepcopy_dict(self.get_edge(s, t))) return new_node def relabel_node(self, node_id, new_id): """Relabel a node in the graph. Parameters ---------- node_id : hashable Id of the node to relabel. new_id : hashable New label of a node. """ if new_id in self.nodes(): raise ReGraphError( "Cannot relabel '{}' to '{}', '{}' ".format( node_id, new_id, new_id) + "already exists in the graph") self.clone_node(node_id, new_id) self.remove_node(node_id) def merge_nodes(self, nodes, node_id=None, method="union", edge_method="union"): """Merge a list of nodes. Parameters ---------- nodes : iterable Collection of node id's to merge. node_id : hashable, optional Id of a new node corresponding to the result of merge. method : optional Method of node attributes merge: if `"union"` the resulting node will contain the union of all attributes of the merged nodes, if `"intersection"`, the resulting node will contain their intersection. Default value is `"union"`. edge_method : optional Method of edge attributes merge: if `"union"` the edges that were merged will contain the union of all attributes, if `"intersection"` -- their ntersection. Default value is `"union"`. """ if len(nodes) > 1: if method is None: method = "union" if edge_method is None: method = "union" # Generate name for new node if node_id is None: node_id = "_".join(sorted([str(n) for n in nodes])) if node_id in self.nodes(): node_id = self.generate_new_node_id(node_id) elif node_id in self.nodes() and (node_id not in nodes): raise GraphError( "New name for merged node is not valid: " "node with name '%s' already exists!" % node_id ) # Merge data attached to node according to the method specified # restore proper connectivity if method == "union": attr_accumulator = {} elif method == "intersection": attr_accumulator = safe_deepcopy_dict( self.get_node(nodes[0])) else: raise ReGraphError("Merging method '{}' is not defined!".format( method)) self_loop = False self_loop_attrs = {} source_nodes = set() target_nodes = set() source_dict = {} target_dict = {} for node in nodes: attr_accumulator = merge_attributes( attr_accumulator, self.get_node(node), method) in_edges = self.in_edges(node) out_edges = self.out_edges(node) # manage self loops for s, t in in_edges: if s in nodes: self_loop = True if len(self_loop_attrs) == 0: self_loop_attrs = self.get_edge(s, t) else: self_loop_attrs = merge_attributes( self_loop_attrs, self.get_edge(s, t), edge_method) for s, t in out_edges: if t in nodes: self_loop = True if len(self_loop_attrs) == 0: self_loop_attrs = self.get_edge(s, t) else: self_loop_attrs = merge_attributes( self_loop_attrs, self.get_edge(s, t), edge_method) source_nodes.update( [n if n not in nodes else node_id for n, _ in in_edges]) target_nodes.update( [n if n not in nodes else node_id for _, n in out_edges]) for edge in in_edges: if not edge[0] in source_dict.keys(): attrs = self.get_edge(edge[0], edge[1]) source_dict.update({edge[0]: attrs}) else: attrs = merge_attributes( source_dict[edge[0]], self.get_edge(edge[0], edge[1]), edge_method) source_dict.update({edge[0]: attrs}) for edge in out_edges: if not edge[1] in target_dict.keys(): attrs = self.get_edge(edge[0], edge[1]) target_dict.update({edge[1]: attrs}) else: attrs = merge_attributes( target_dict[edge[1]], self.get_edge(edge[0], edge[1]), edge_method) target_dict.update({edge[1]: attrs}) self.remove_node(node) self.add_node(node_id, attr_accumulator) if self_loop: self.add_edges_from([(node_id, node_id)]) self.set_edge(node_id, node_id, self_loop_attrs) for n in source_nodes: if not self.exists_edge(n, node_id): self.add_edge(n, node_id) for n in target_nodes: if not self.exists_edge(node_id, n): self.add_edge(node_id, n) # Attach accumulated attributes to edges for node, attrs in source_dict.items(): if node not in nodes: self.set_edge(node, node_id, attrs) for node, attrs in target_dict.items(): if node not in nodes: self.set_edge(node_id, node, attrs) return node_id else: raise ReGraphError( "More than two nodes should be specified for merging!") def copy_node(self, node_id, copy_id=None): """Copy node. Create a copy of a node in a graph. A new id for the copy is generated by regraph.primitives.unique_node_id. Parameters ---------- node_id : hashable Node to copy. Returns ------- new_name Id of the copy node. """ if copy_id is None: copy_id = self.generate_new_node_id(node_id) if copy_id in self.nodes(): raise ReGraphError( "Cannot create a copy of '{}' with id '{}', ".format( node_id, copy_id) + "node '{}' already exists in the graph".format(copy_id)) attrs = self.get_node(node_id) self.add_node(copy_id, attrs) return copy_id def relabel_nodes(self, mapping): """Relabel graph nodes inplace given a mapping. Similar to networkx.relabel.relabel_nodes: https://networkx.github.io/documentation/development/_modules/networkx/relabel.html Parameters ---------- mapping: dict A dictionary with keys being old node ids and their values being new id's of the respective nodes. Raises ------ ReGraphError If new id's do not define a set of distinct node id's. """ unique_names = set(mapping.values()) if len(unique_names) != len(self.nodes()): raise ReGraphError( "Attempt to relabel nodes failed: the IDs are not unique!") temp_names = {} # Relabeling of the nodes: if at some point new ID conflicts # with already existing ID - assign temp ID for key, value in mapping.items(): if key != value: if value not in self.nodes(): new_name = value else: new_name = self.generate_new_node_id(value) temp_names[new_name] = value self.relabel_node(key, new_name) # Relabeling the nodes with the temp ID to their new IDs for key, value in temp_names.items(): if key != value: self.relabel_node(key, value) return def generate_new_node_id(self, basename): """Generate new unique node identifier.""" return generate_new_id(self.nodes(), basename) def filter_edges_by_attributes(self, attr_key, attr_cond): """Filter graph edges by attributes. Removes all the edges of the graph (inplace) that do not satisfy `attr_cond`. Parameters ---------- attrs_key : hashable Attribute key attrs_cond : callable Condition for an attribute to satisfy: callable that returns `True` if condition is satisfied, `False` otherwise. """ for (s, t) in self.edges(): edge_attrs = self.get_edge(s, t) if (attr_key not in edge_attrs.keys() or not attr_cond(edge_attrs[attr_key])): self.remove_edge(s, t) def to_json(self): """Create a JSON representation of a graph.""" j_data = {"edges": [], "nodes": []} # dump nodes for node in self.nodes(): node_data = {} node_data["id"] = node node_attrs = self.get_node(node) if node_attrs is not None: attrs = {} for key, value in node_attrs.items(): attrs[key] = value.to_json() node_data["attrs"] = attrs j_data["nodes"].append(node_data) # dump edges for s, t in self.edges(): edge_data = {} edge_data["from"] = s edge_data["to"] = t edge_attrs = self.get_edge(s, t) if edge_attrs is not None: attrs = {} for key, value in edge_attrs.items(): attrs[key] = value.to_json() edge_data["attrs"] = attrs j_data["edges"].append(edge_data) return j_data def to_d3_json(self, attrs=True, node_attrs_to_attach=None, edge_attrs_to_attach=None, nodes=None): """Create a JSON representation of a graph.""" j_data = {"links": [], "nodes": []} if nodes is None: nodes = self.nodes() # dump nodes for node in nodes: node_data = {} node_data["id"] = node if attrs: node_attrs = self.get_node(node) normalize_attrs(node_attrs) attrs_json = dict() for key, value in node_attrs.items(): attrs_json[key] = value.to_json() node_data["attrs"] = attrs_json else: node_attrs = self.get_node(node) if node_attrs_to_attach is not None: for key in node_attrs_to_attach: if key in node_attrs.keys(): node_data[key] = list(node_attrs[key]) j_data["nodes"].append(node_data) # dump edges for s, t in self.edges(): if s in nodes and t in nodes: edge_data = {} edge_data["source"] = s edge_data["target"] = t if attrs: edge_attrs = self.get_edge(s, t) normalize_attrs(edge_attrs) attrs_json = dict() for key, value in edge_attrs.items(): attrs_json[key] = value.to_json() edge_data["attrs"] = attrs_json else: if edge_attrs_to_attach is not None: for key in edge_attrs_to_attach: edge_attrs = self.get_edge(s, t) if key in edge_attrs.keys(): edge_data[key] = list(edge_attrs[key]) j_data["links"].append(edge_data) return j_data def export(self, filename): """Export graph to JSON file. Parameters ---------- filename : str Name of the file to save the json serialization of the graph """ with open(filename, 'w') as f: j_data = self.to_json() json.dump(j_data, f) return @classmethod def from_json(cls, json_data): """Create a NetworkX graph from a json-like dictionary. Parameters ---------- json_data : dict JSON-like dictionary with graph representation """ graph = cls() graph.add_nodes_from(load_nodes_from_json(json_data)) graph.add_edges_from(load_edges_from_json(json_data)) return graph @classmethod def load(cls, filename): """Load a graph from a JSON file. Create a `networkx.(Di)Graph` object from a JSON representation stored in a file. Parameters ---------- filename : str Name of the file to load the json serialization of the graph Returns ------- Graph object Raises ------ ReGraphError If was not able to load the file """ if os.path.isfile(filename): with open(filename, "r+") as f: j_data = json.loads(f.read()) return cls.from_json(j_data) else: raise ReGraphError( "Error loading graph: file '{}' does not exist!".format( filename) ) def rewrite(self, rule, instance=None): """Perform SqPO rewiting of the graph with a rule. Parameters ---------- rule : regraph.Rule SqPO rewriting rule instance : dict, optional Instance of the input rule. If not specified, the identity map of the rule's left-hand side is used """ if instance is None: instance = { n: n for n in self.lhs.nodes() } # Restrictive phase p_g = dict() cloned_lhs_nodes = set() # Clone nodes for lhs, p_nodes in rule.cloned_nodes().items(): for i, p in enumerate(p_nodes): if i == 0: p_g[p] = instance[lhs] cloned_lhs_nodes.add(lhs) else: clone_id = self.clone_node(instance[lhs]) p_g[p] = clone_id # Delete nodes and add preserved nodes to p_g dictionary removed_nodes = rule.removed_nodes() for n in rule.lhs.nodes(): if n in removed_nodes: self.remove_node(instance[n]) elif n not in cloned_lhs_nodes: p_g[keys_by_value(rule.p_lhs, n)[0]] =\ instance[n] # Delete edges for u, v in rule.removed_edges(): self.remove_edge(p_g[u], p_g[v]) # Remove node attributes for p_node, attrs in rule.removed_node_attrs().items(): self.remove_node_attrs( p_g[p_node], attrs) # Remove edge attributes for (u, v), attrs in rule.removed_edge_attrs().items(): self.remove_edge_attrs(p_g[u], p_g[v], attrs) # Expansive phase rhs_g = dict() merged_nodes = set() # Merge nodes for rhs, p_nodes in rule.merged_nodes().items(): merge_id = self.merge_nodes( [p_g[p] for p in p_nodes]) merged_nodes.add(rhs) rhs_g[rhs] = merge_id # Add nodes and add preserved nodes to rhs_g dictionary added_nodes = rule.added_nodes() for n in rule.rhs.nodes(): if n in added_nodes: if n in self.nodes(): new_id = self.generate_new_node_id(n) else: new_id = n new_id = self.add_node(new_id) rhs_g[n] = new_id elif n not in merged_nodes: rhs_g[n] = p_g[keys_by_value(rule.p_rhs, n)[0]] # Add edges for u, v in rule.added_edges(): if (rhs_g[u], rhs_g[v]) not in self.edges(): self.add_edge(rhs_g[u], rhs_g[v]) # Add node attributes for rhs_node, attrs in rule.added_node_attrs().items(): self.add_node_attrs( rhs_g[rhs_node], attrs) # Add edge attributes for (u, v), attrs in rule.added_edge_attrs().items(): self.add_edge_attrs( rhs_g[u], rhs_g[v], attrs) return rhs_g def number_of_edges(self, u, v): """Return number of directed edges from u to v.""" return 1 def ancestors(self, t): """Return the set of ancestors.""" current_level = set(self.predecessors(t)) visited = set() while len(current_level) > 0: next_level = set() for el in current_level: if el not in visited: visited.add(el) next_level.update([ p for p in self.predecessors(el) if p not in visited]) current_level = next_level return visited def descendants(self, s): """Return the set of ancestors.""" current_level = set(self.successors(s)) visited = set() while len(current_level) > 0: next_level = set() for el in current_level: if el not in visited: visited.add(el) next_level.update([ p for p in self.successors(el) if p not in visited]) current_level = next_level return visited
{ "repo_name": "Kappa-Dev/ReGraph", "path": "regraph/graphs.py", "copies": "1", "size": "33284", "license": "mit", "hash": 3002577695356826600, "line_mean": 30.4891201514, "line_max": 91, "alpha_frac": 0.4856988343, "autogenerated": false, "ratio": 4.416080668701075, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5401779503001075, "avg_score": null, "num_lines": null }
"""Abstract grid representations.""" from pygame import Rect class Grid (object): """A representation of a 2D grid of rectangular integer-sized tiles. Used for aligning mouse input, graphics, etc. on a grid. Grid(ntiles, tile_size, gap = 0) :arg ntiles: ``(x, y)`` number of tiles in the grid, or a single number for a square grid. :arg tile_size: ``(tile_width, tile_height)`` integers giving the size of every tile, or a single number for square tiles. ``tile_width`` and ``tile_height`` can also be functions that take the column/row index and return the width/height of that column/row respectively, or lists (or anything supporting indexing) that perform the same task. :arg gap: ``(col_gap, row_gap)`` integers giving the gap between columns and rows respectively, or a single number for the same gap in both cases. As with ``tile_size``, this can be a tuple of functions (or lists) which take the index of the preceding column/row and return the gap size. ``col`` and ``row`` arguments to all methods may be negative to wrap from the end of the row/column, like list indices. """ def __init__ (self, ntiles, tile_size, gap = 0): if isinstance(ntiles, int): ntiles = (ntiles, ntiles) else: ntiles = tuple(ntiles[:2]) #: The ``(x, y)`` number of tiles in the grid. self.ntiles = ntiles def expand (obj, length): # expand an int/list/function to the given length if isinstance(obj, int): return (obj,) * length elif callable(obj): return tuple(obj(i) for i in xrange(length)) else: return tuple(obj[:length]) if isinstance(tile_size, int) or callable(tile_size): tx = ty = tile_size else: tx, ty = tile_size self._tile_size = (expand(tx, ntiles[0]), expand(ty, ntiles[1])) if isinstance(gap, int) or callable(tile_size): gx = gy = gap else: gx, gy = gap self._gap = (expand(gx, ntiles[0] - 1), expand(gy, ntiles[1] - 1)) @property def ncols (self): """The number of tiles in a row.""" return self.ntiles[0] @property def nrows (self): """The number of tiles in a column.""" return self.ntiles[1] def _size (self, axis): return sum(self._tile_size[axis]) + sum(self._gap[axis]) @property def w (self): """The total width of the grid.""" return self._size(0) @property def h (self): """The total height of the grid.""" return self._size(1) @property def size (self): """The total ``(width, height)`` size of the grid.""" return (self.w, self.h) def _tile_pos (self, axis, index): return sum(ts + gap for ts, gap in zip(self._tile_size[axis][:index], self._gap[axis][:index])) def tile_x (self, col): """Get the x position of the tile in the column with the given index. This is the position of the left side of the tile relative to the left side of the grid. """ return self._tile_pos(0, col) def tile_y (self, row): """Get the y position of the tile in the row with the given index. This is the position of the top side of the tile relative to the top side of the grid. """ return self._tile_pos(1, row) def tile_pos (self, col, row): """Get the ``(x, y)`` position of the tile in the given column and row. This is the top-left corner of the tile relative to the top-left corner of the grid. """ return (self.tile_x(col), self.tile_y(row)) def tile_size (self, col, row): """Get the ``(width, height)`` size of the given tile.""" return (self._tile_size[0][col], self._tile_size[1][row]) def tile_rect (self, col, row): """Get a Pygame rect for the tile in the given column and row. This is relative to the top-left corner of the grid. """ return Rect(self.tile_pos(col, row), self.tile_size(col, row)) def tile_rects (self, pos=False): """Iterator over :meth:`tile_rect` for all tiles. :arg pos: whether to yield ``(col, row, tile_rect)`` instead of just ``tile_rect``. """ ts = self._tile_size gap = self._gap x = 0 # add extra element to gap so we iterate over the last tile for col, (w, gap_x) in enumerate(zip(ts[0], gap[0] + (0,))): y = 0 for row, (h, gap_y) in enumerate(zip(ts[1], gap[1] + (0,))): r = Rect(x, y, w, h) yield (col, row, r) if pos else r y += h + gap_y x += w + gap_x def tile_at (self, x, y): """Return the ``(col, row)`` tile at the point ``(x, y)``, or ``None``.""" if x < 0 or y < 0: return None pos = (x, y) tile = [] for axis, pos in enumerate((x, y)): current_pos = 0 ts = self._tile_size[axis] gap = self._gap[axis] + (0,) for i in xrange(self.ntiles[axis]): current_pos += ts[i] # now we're at the end of a tile if current_pos > pos: # pos is within the previous tile tile.append(i) break current_pos += gap[i] # now we're at the start of a tile if current_pos > pos: # pos is within the previous gap return None else: # didn't find a tile: point is past the end return None return tuple(tile) def align (self, graphic, col, row, alignment=0, pad=0, offset=0): """Align a graphic or surface within a tile. align(self, graphic, col, row, alignment=0, pad=0, offset=0) -> aligned_rect ``alignment``, ``pad`` and ``offset`` are as taken by :func:`align_rect <engine.util.align_rect>`. :arg graphic: a :class:`gfx.Graphic <engine.gfx.graphic.Graphic>` instance or a Pygame surface. In the former case, the graphic is moved (but it is not cropped to fit in the tile). :arg col: column of the tile. :arg row: row of the tile. :return: a Pygame rect clipped within the tile giving the area the graphic should be put in. """ if isinstance(graphic, Graphic): rect = graphic.rect else: rect = graphic.get_rect() pos = align_rect(rect, self.tile_rect(col, row), alignment, pad, offset) if isinstance(graphic, Graphic): graphic.pos = pos return Rect(pos, rect.size) class InfiniteGrid (object): """A representation of an infinite 2D grid of rectangular tiles. InfiniteGrid(tile_size, gap=0) :arg tile_size: ``(tile_width, tile_height)`` numbers giving the size of every tile, or a single number for square tiles. :arg gap: ``(col_gap, row_gap)`` numbers giving the gap between columns and rows respectively, or a single number for the same gap in both cases. The grid expands in all directions, so ``col`` and ``row`` arguments to methods may be negative, and tile/gap sizes may be floats. """ def __init__ (self, tile_size, gap=0): if isinstance(tile_size, (int, float)): tile_size = (tile_size, tile_size) else: tile_size = tuple(tile_size[:2]) if any(x < 0 for x in tile_size): raise ValueError('tile sizes must be positive') #: ``tile_size`` as taken by the constructor. self.tile_size = tile_size if isinstance(gap, (int, float)): gap = (gap, gap) else: gap = tuple(gap[:2]) if any(g < 0 for g in gap): raise ValueError('tile gaps must be positive') #: ``gap`` as taken by the constructor. self.gap = gap def tile_x (self, col): """Get the x position of the tile in the column with the given index. This is the position of the left side of the tile relative to the left side of column ``0``. """ return (self.tile_size[0] * self.gap[0]) * col def tile_y (self, row): """Get the y position of the tile in the row with the given index. This is the position of the top side of the tile relative to the top side of row ``0``. """ return (self.tile_size[1] * self.gap[1]) * row def tile_pos (self, col, row): """Get the ``(x, y)`` position of the tile in the given column and row. This is the top-left corner of the tile relative to the top-left corner of the tile ``(0, 0)``. """ return (self.tile_x(col), self.tile_y(row)) def tile_rect (self, col, row): """Get a Pygame-style rect for the tile in the given column and row. This is relative to tile ``(0, 0)``, and elements can be floats. """ return self.tile_pos(col, row) + self.tile_size def tile_rects (self, rect, pos=False): """Iterator over :meth:`tile_rect` for tiles that intersect ``rect``. :arg rect: ``(x, y, w, h)`` with elements possibly floats. :arg pos: whether to yield ``(col, row, tile_rect)`` instead of just ``tile_rect``. """ ts = self.tile_size gap = self.gap # compute offsets x0 = (rect[0] // (ts[0] + gap[0])) * (ts[0] + gap[0]) y0 = (rect[1] // (ts[1] + gap[1])) * (ts[1] + gap[1]) # do the loop xr = rect[0] + rect[2] yb = rect[1] + rect[3] x = x0 col = 0 while True: y = y0 row = 0 while True: yield (col, row, r) if pos else r y += ts[1] + gap[1] if y >= yb: break row += 1 x += ts[0] + gap[0] if x >= xr: break col += 1 def tile_at (self, x, y): """Return the ``(col, row)`` tile at the point ``(x, y)``, or ``None``. Returns ``None`` within gaps between tiles. """ ts = self.tile_size gap = self.gap pos = (x, y) tile = [] for axis in (0, 1): this_tile, offset = divmod(pos[axis], float(ts[axis] + gap[axis])) if offset < ts[axis]: # in the tile tile.append(this_tile) else: # in the gap return None return tuple(tile) def align (self, graphic, col, row, alignment=0, pad=0, offset=0): """Align a graphic or surface within a tile. align(self, graphic, col, row, alignment=0, pad=0, offset=0) -> aligned_rect ``alignment``, ``pad`` and ``offset`` are as taken by :func:`align_rect <engine.util.align_rect>`. :arg graphic: a :class:`gfx.Graphic <engine.gfx.graphic.Graphic>` instance or a Pygame surface. In the former case, the graphic is moved (but it is not cropped to fit in the tile). :arg col: column of the tile. :arg row: row of the tile. :return: a Pygame rect clipped within the tile giving the area the graphic should be put in. """ if isinstance(graphic, Graphic): rect = graphic.rect else: rect = graphic.get_rect() pos = align_rect(rect, self.tile_rect(col, row), alignment, pad, offset) if isinstance(graphic, Graphic): graphic.pos = pos return Rect(pos, rect.size)
{ "repo_name": "ikn/pygame-template", "path": "game/engine/util/grid.py", "copies": "1", "size": "11655", "license": "bsd-3-clause", "hash": 5091474990720968000, "line_mean": 31.9237288136, "line_max": 79, "alpha_frac": 0.5516087516, "autogenerated": false, "ratio": 3.6929657794676807, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9732604502967055, "avg_score": 0.002394005620125145, "num_lines": 354 }
"""Abstract Hacker News Item Author: Rylan Santinon """ from abc import ABCMeta, abstractmethod #pylint: disable=abstract-class-not-used class HnItem(object): """Abstract item""" __metaclass__ = ABCMeta @abstractmethod def get_schema(self): """Return the item's schema""" pass def is_deleted(self): """Return True if this item is deleted""" #pylint: disable=no-member return not not self.json.get('deleted') or \ not not self.json.get('dead') def get_field_by_name(self, name): """Get field by name Parameter ---------- name : str The name of the field Returns ------- str The value held in the field Raises ------ RuntimeError if `name` is not in the schema """ #pylint: disable=no-member schema = self.get_schema() if schema.has_field(name) or self.is_special_field(name): try: return self.json[name] except KeyError: return '' else: raise RuntimeError("No field named %r in %r" \ % (name, schema)) def get(self, name): """Same as get_field_by_name""" return self.get_field_by_name(name) def is_special_field(self, name): """Return True if name is a field like 'deleted' or 'dead'""" return name == 'deleted' or name == 'dead' @abstractmethod def __repr__(self): pass
{ "repo_name": "davande/hackernews-top", "path": "hnapi/items/hnitem.py", "copies": "1", "size": "1562", "license": "apache-2.0", "hash": 3949960647015661000, "line_mean": 23.7936507937, "line_max": 69, "alpha_frac": 0.528809219, "autogenerated": false, "ratio": 4.233062330623306, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0030474876023917673, "num_lines": 63 }
"""Abstract Handler with helper methods.""" from clang.cindex import CursorKind, TypeKind from ctypeslib.codegen import typedesc from ctypeslib.codegen.util import log_entity import logging log = logging.getLogger('handler') # DEBUG import code class CursorKindException(TypeError): """When a child node of a VAR_DECL is parsed as an initialization value, when its not actually part of that initiwlization value.""" pass class InvalidDefinitionError(TypeError): """When a structure is invalid in the source code, sizeof, alignof returns negatives value. We detect it and do our best.""" pass class DuplicateDefinitionException(KeyError): """When we encounter a duplicate declaration/definition name.""" pass ################################################################ class ClangHandler(object): """ Abstract class for handlers. """ def __init__(self, parser): self.parser = parser self._unhandled = [] def register(self, name, obj): return self.parser.register(name, obj) def get_registered(self, name): return self.parser.get_registered(name) def is_registered(self, name): return self.parser.is_registered(name) def remove_registered(self, name): return self.parser.remove_registered(name) def set_location(self, obj, cursor): """ Location is also used for codegeneration ordering.""" if (hasattr(cursor, 'location') and cursor.location is not None and cursor.location.file is not None): obj.location = (cursor.location.file.name, cursor.location.line) return def set_comment(self, obj, cursor): """ If a comment is available, add it to the typedesc.""" if isinstance(obj, typedesc.T): obj.comment = cursor.brief_comment return def make_python_name(self, name): """Transforms an USR into a valid python name.""" # FIXME see cindex.SpellingCache for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''), ("$", "DOLLAR"), (".", "DOT"), ("@", "_"), (":", "_"), ('-', '_')]: if k in name: # template name = name.replace(k, v) # FIXME: test case ? I want this func to be neutral on C valid # names. if name.startswith("__"): return "_X" + name if len(name) == 0: raise ValueError elif name[0] in "01234567879": return "_" + name return name def get_unique_name(self, cursor): name = '' if hasattr(cursor, 'displayname'): name = cursor.displayname elif hasattr(cursor, 'spelling'): name = cursor.spelling if name == '' and hasattr( cursor, 'get_usr'): # FIXME: should not get Type _id = cursor.get_usr() if _id == '': # anonymous is spelling == '' return None name = self.make_python_name(_id) if cursor.kind == CursorKind.STRUCT_DECL: name = 'struct_%s' % (name) elif cursor.kind == CursorKind.UNION_DECL: name = 'union_%s' % (name) elif cursor.kind == CursorKind.CLASS_DECL: name = 'class_%s' % (name) elif cursor.kind == CursorKind.TYPE_REF: name = name.replace(' ', '_') return name def is_fundamental_type(self, t): return (not self.is_pointer_type(t) and t.kind in self.parser.ctypes_typename.keys()) def is_pointer_type(self, t): return t.kind == TypeKind.POINTER def is_array_type(self, t): return (t.kind == TypeKind.CONSTANTARRAY or t.kind == TypeKind.INCOMPLETEARRAY or t.kind == TypeKind.VARIABLEARRAY or t.kind == TypeKind.DEPENDENTSIZEDARRAY) def is_unexposed_type(self, t): return t.kind == TypeKind.UNEXPOSED def is_literal_cursor(self, t): return (t.kind == CursorKind.INTEGER_LITERAL or t.kind == CursorKind.FLOATING_LITERAL or t.kind == CursorKind.IMAGINARY_LITERAL or t.kind == CursorKind.STRING_LITERAL or t.kind == CursorKind.CHARACTER_LITERAL) def get_literal_kind_affinity(self, literal_kind): ''' return the list of fundamental types that are adequate for which this literal_kind is adequate''' if literal_kind == CursorKind.INTEGER_LITERAL: return [TypeKind.USHORT, TypeKind.UINT, TypeKind.ULONG, TypeKind.ULONGLONG, TypeKind.UINT128, TypeKind.SHORT, TypeKind.INT, TypeKind.LONG, TypeKind.LONGLONG, TypeKind.INT128, ] elif literal_kind == CursorKind.STRING_LITERAL: return [TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.CHAR_S, TypeKind.SCHAR, TypeKind.WCHAR] # DEBUG elif literal_kind == CursorKind.CHARACTER_LITERAL: return [TypeKind.CHAR_U, TypeKind.UCHAR] elif literal_kind == CursorKind.FLOATING_LITERAL: return [TypeKind.FLOAT, TypeKind.DOUBLE, TypeKind.LONGDOUBLE] elif literal_kind == CursorKind.IMAGINARY_LITERAL: return [] return [] def get_ctypes_name(self, typekind): return self.parser.get_ctypes_name(typekind) def get_ctypes_size(self, typekind): return self.parser.get_ctypes_size(typekind) def parse_cursor(self, cursor): return self.parser.parse_cursor(cursor) def parse_cursor_type(self, _cursor_type): return self.parser.parse_cursor_type(_cursor_type) ################################ # do-nothing element handlers @log_entity def _pass_through_children(self, node, **args): for child in node.get_children(): self.parser.startElement(child) return True def _do_nothing(self, node, **args): name = self.get_unique_name(node) #import code # code.interact(local=locals()) log.warning('_do_nothing for %s/%s' % (node.kind.name, name)) return True ########################################### # TODO FIXME: 100% cursor/type Kind coverage def __getattr__(self, name, **args): if name not in self._unhandled: log.warning('%s is not handled' % (name)) self._unhandled.append(name) return self._do_nothing
{ "repo_name": "luzfcb/ctypeslib", "path": "ctypeslib/codegen/handler.py", "copies": "1", "size": "6508", "license": "mit", "hash": 3872297691283285500, "line_mean": 33.6170212766, "line_max": 80, "alpha_frac": 0.5766748617, "autogenerated": false, "ratio": 4.0447482908638905, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5121423152563891, "avg_score": null, "num_lines": null }
"""Abstract handling of git repositories.""" # Python 2 compatability imports try: from pathlib import Path except ImportError: from pathlib2 import Path try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse import collections import hashlib import os import shutil import subprocess GIT_BINARY = "/usr/bin/git" Worktree = collections.namedtuple( "Worktree", [ "path", "head", "branch", ] ) def repo_url_hash_path_builder(working_directory, url): """Returns a unique repo Path in the given working_directory based on a two level hash of the repo URL. """ url_hash = hashlib.md5(url.encode("utf-8")).hexdigest() parent_directory = url_hash[:2] repository_directory = url_hash[2:] return Path(working_directory)\ .joinpath(parent_directory)\ .joinpath(repository_directory) def branch_hash_worktree_path_builder(branch): """Returns a relative path for a worktree based on the hash of the branch name. """ return hashlib.md5(branch.encode("utf-8")).hexdigest() class RepositoryLocation: def __init__( self, working_directory, repository_url, path_builder_func=repo_url_hash_path_builder): url_hash = hashlib.md5(repository_url.encode("utf-8")).hexdigest() self.working_directory = working_directory self.url = repository_url self.repository_url = urlparse(repository_url) self.worktree_path_builder = branch_hash_worktree_path_builder # root_path is the container directory that will hold the core clone # along with any worktrees created later. self.root_path = path_builder_func(working_directory, repository_url) # path is where the core clone is located. Defaults to a full hash # of the repo URL. self.path = self.root_path / url_hash @property def exists(self): return self.path.exists() def create(self): if self.exists: return False self.path.mkdir(parents=True, exist_ok=True) return True def remove(self): shutil.rmtree(str(self.root_path)) # Remove the parent directory if empty. try: self.path.parent.rmdir() except OSError: # Parent directory isn't empty, do nothing. pass def fq_path(self, relative_path): return self.path / relative_path def path_exists(self, relative_path): return self.fq_path(relative_path).exists() def search(self, pattern): return self.path.glob(pattern) def worktree_path(self, branch_name): wt_path = self.worktree_path_builder(branch_name) return self.root_path / wt_path class RepositoryError(Exception): """A generic container for errors generated by the git command.""" class Repository: def __init__( self, repository_location, binary, bare=False, clone=False): self._url = repository_location.url self.location = repository_location self.binary = binary self.bare = bare self.created = False # Clone the repo on initialization. NOTE: the properties # accessed by the methods below need to be defined before calling # the methods. if not self.ready and clone: self.clone(self.bare) @property def ready(self): if not self.location.exists: return False if self.bare: try: self._git("rev-parse", ["--git-dir"]) except subprocess.CalledProcessError: return False return True else: if not self.location.path_exists(".git"): return False return True def _git(self, command, arguments=None, cwd=None): if cwd is None: cwd = str(self.location.path) if arguments is None: arguments = [] args = [self.binary, command] args.extend(arguments) result = subprocess.check_output( args=args, cwd=cwd, universal_newlines=True ) return result def _ready_target_location(self): self.created = self.location.create() def _remove_target_location(self): self.location.remove() def _hard_reset(self): self._git("reset", ["--hard", "HEAD"]) def _checkout(self, reference): self._git("checkout", [reference]) def _pull(self, cwd=None): self._git("pull", cwd=cwd) def _split_branch_name(self, reference): return reference.split("refs/heads/")[1] def clone(self, bare=False): self._ready_target_location() args = [self._url, str(self.location.path)] if bare: args.insert(0, "--bare") try: self._git("clone", args) except subprocess.CalledProcessError as exc: message = exc.stderr # Clone failed, so cleanup the directories, but only if we created # the diretory for the clone attempt. if self.created: self.location.remove() raise RepositoryError(message) def update(self): """Pull the latest changes from remote.""" self._pull() def update_to(self, reference): """Make a given reference active. This is equivalent to a checkout and pull on the given reference. The reference can be any git reference (commit, branch, tag, ...). """ self._checkout(reference) self._pull() def export(self, destination): """Export all the files of the current working copy to the given destination. This will ensure that none of the git specific files are copied to the target destination. """ # Ensure trailing slash is present. destination = os.path.join(destination, "") self._git( "checkout-index", ["-a", "-f", "--prefix={0}".format(destination)] ) def latest_commit(self, cwd=None): result = self._git("rev-parse", ["--verify", "HEAD"], cwd=cwd) return result.strip() def branches(self): result = self._git("ls-remote", ["--heads"]) if not result: return [] branches = result.strip().split("\n") branches = [self._split_branch_name(b) for b in branches] return branches @property def active_branch(self): result = self._git("symbolic-ref", ["HEAD"]) if not result: return None branch = result.strip() return branch.split("refs/heads/")[1] def tags(self): result = self._git("ls-remote", ["--tags"]) if not result: return [] tags = result.strip().split("\n") tags = [t.split("refs/tags/")[1] for t in tags] return tags def worktrees(self, branch_name=None): result = self._git("worktree", ["list", "--porcelain"]) if not result: return [] worktrees = [] current_wt = {} for line in result.strip().splitlines(): if not line: # Line is empty, this a worktree boundry worktrees.append(current_wt) current_wt = {} continue key, value = line.split(" ") current_wt[key] = value worktrees.append(current_wt) tuples = [] for d in worktrees: wt = Worktree( path=d["worktree"], head=d["HEAD"], branch=self._split_branch_name(d["branch"]) ) if branch_name == wt.branch: return wt tuples.append(wt) if branch_name is not None: # If branch_name was specified but we got here without finding and # returning it then it is invalid. msg = "Unknown worktree: '{0}'".format(branch_name) raise RepositoryError(msg) return tuples def add_worktree(self, branch_name): path = str(self.location.worktree_path(branch_name)) # TODO: Make references to origin more flexible. self._git( "worktree", [ "add", "-b", branch_name, path, "origin/{0}".format(branch_name) ] ) head = self.latest_commit(cwd=path) return Worktree(path=path, head=head, branch=branch_name) def remove_worktree(self, branch_name): wt = self.worktrees(branch_name) path = wt.path # Remove the worktree off the file system. shutil.rmtree(path) # Force git to update the worktrees now. self._git("worktree", ["prune"]) # Remove the local branch that was created. self._git("branch", ["-d", branch_name]) def update_worktree(self, branch_name): wt = self.worktrees(branch_name) path = wt.path self._pull(cwd=path) def clean(self, thorough=False): """Cleans the current working copy of the repository. If thorough is specified this method complete moves the directory of the working copy. Otherwise it just performs a hard git reset to HEAD. """ if thorough: self._remove_target_location() else: self._hard_reset() def fq_path(self, relative_path): """Returns a fully qualified path given a path relative to the repository root. """ return self.location.fq_path(relative_path) def search(self, pattern): """Returns a list of file paths matching the given pattern.""" return self.location.search(pattern) def setup_repository( working_directory, url, binary=GIT_BINARY, bare=False, clone=False): """A helper function to construct a Repository with a RepositoryLocation. """ location = RepositoryLocation(working_directory, url) return Repository(location, binary, bare=bare, clone=clone)
{ "repo_name": "jpaidoussi/columbia-git", "path": "columbia/git.py", "copies": "1", "size": "10191", "license": "cc0-1.0", "hash": 7706432021325047000, "line_mean": 28.625, "line_max": 78, "alpha_frac": 0.581787852, "autogenerated": false, "ratio": 4.278337531486146, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5360125383486146, "avg_score": null, "num_lines": null }
# abstract heap, minheap, and maxheap -- implementations using Node objects # NOTE: with Node objects, can support delete() operation with O(lg n) class Node(object): __slots__ = ['value', 'heap', 'idx'] def __init__(self, value): self.value = value def parent(i): return (i-1)//2 def left(i): return i*2+1 def right(i): return i*2+2 class AbstractHeap(object): def __init__(self): self.a = [] def compare(self, l, r): raise NotImplementedError("AbstractHeap does not implement comparison!") def swap(self, i, j): self.a[i].idx = j self.a[j].idx = i self.a[i], self.a[j] = self.a[j], self.a[i] def upheap(self, i): if i == 0: return p = parent(i) if self.compare(self.a[i], self.a[p]): self.swap(i, p) self.upheap(p) def downheap(self, i): l, r = left(i), right(i) toswap = i if l < len(self.a) and self.compare(self.a[l], self.a[toswap]): toswap = l if r < len(self.a) and self.compare(self.a[r], self.a[toswap]): toswap = r if toswap != i: self.swap(i, toswap) self.downheap(toswap) def insert(self, v): i = len(self.a) v.heap = self v.idx = i self.a.append(v) self.upheap(i) def peek(self): return self.a[0].value if len(self.a) > 0 else 1000 def extract(self): if len(self.a) == 1: return self.a.pop() res = self.a[0] self.a[0] = self.a.pop() self.downheap(0) return res def delete(self, node): idx = node.idx if idx == len(self.a)-1: self.a.pop() return self.swap(idx, len(self.a)-1) self.a.pop() if idx != 0 and self.compare(self.a[idx], self.a[parent(idx)]): self.upheap(idx) else: self.downheap(idx) class MaxHeap(AbstractHeap): def compare(self, l, r): return l.value > r.value class MinHeap(AbstractHeap): def compare(self, l, r): return l.value < r.value
{ "repo_name": "dimakuv/python-algos", "path": "heap/nodeheap.py", "copies": "1", "size": "2134", "license": "mit", "hash": -7411420327723815000, "line_mean": 24.7108433735, "line_max": 80, "alpha_frac": 0.5248359888, "autogenerated": false, "ratio": 3.142857142857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9129472903320639, "avg_score": 0.007644045667300797, "num_lines": 83 }
# abstract heap def parent(i): return (i-1)/2 def left(i): return i*2+1 def right(i): return i*2+2 class AbstractHeap(object): """priority queue using heap (abstract parent class)""" def __init__(self, a = None): self.a = a[:] if a else [] # copy list if provided def compare(self, l, r): raise NotImplementedError("AbstractHeap does not implement comparison!") def upheap(self, i): if i == 0: return p = parent(i) if self.compare(self.a[i], self.a[p]): self.a[i], self.a[p] = self.a[p], self.a[i] self.upheap(p) def downheap(self, i): # assumption: subtrees at left & right children are maxheaps already l, r = left(i), right(i) toswap = i if l < len(self.a) and self.compare(self.a[l], self.a[toswap]): toswap = l if r < len(self.a) and self.compare(self.a[r], self.a[toswap]): toswap = r if toswap != i: self.a[i], self.a[toswap] = self.a[toswap], self.a[i] self.downheap(toswap) def buildmaxheap(self): lastparent = parent(len(self.a)-1) for i in reversed(xrange(lastparent+1)): # traverse from last parent to root to downheap subtrees self.downheap(i) def insert(self, v): i = len(self.a) self.a.append(v) self.upheap(i) def peek(self): return self.a[0] if len(self.a) > 0 else None def extract(self): if len(self.a) == 0: return None if len(self.a) == 1: return self.a.pop() res = self.a[0] self.a[0] = self.a.pop() self.downheap(0) return res def empty(self): return len(self.a) == 0 def __str__(self): return str(self.a)
{ "repo_name": "dimakuv/python-algos", "path": "heap/abstractheap.py", "copies": "1", "size": "1803", "license": "mit", "hash": -6973945221679498000, "line_mean": 24.7571428571, "line_max": 80, "alpha_frac": 0.540765391, "autogenerated": false, "ratio": 3.202486678507993, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4243252069507993, "avg_score": null, "num_lines": null }
""" **abstract_heated_cavity_phasechange_simulation.py** applies Phaseflow to benchmark problems on the unit square with hot and cold vertical walls, and adiabatic horizontal walls. """ import phaseflow import fenics class AbstractHeatedCavityPhaseChangeSimulation(phaseflow.abstract_phasechange_simulation.AbstractPhaseChangeSimulation): def __init__(self, time_order = 1, integration_measure = fenics.dx(metadata={"quadrature_degree": 8}), initial_uniform_gridsize = 20, setup_solver = True): self.hot_wall_temperature = fenics.Constant(1., name = "T_h") self.cold_wall_temperature = fenics.Constant(-0.01, name = "T_c") self.initial_concentration = fenics.Constant(1., name = "C0") class HotWall(fenics.SubDomain): def inside(self, x, on_boundary): return on_boundary and fenics.near(x[0], 0.) class ColdWall(fenics.SubDomain): def inside(self, x, on_boundary): return on_boundary and fenics.near(x[0], 1.) class Walls(fenics.SubDomain): def inside(self, x, on_boundary): return on_boundary self._HotWall = HotWall self.hot_wall = self._HotWall() self._ColdWall = ColdWall self.cold_wall = self._ColdWall() self._Walls = Walls self.walls = self._Walls() self.initial_uniform_gridsize = initial_uniform_gridsize super().__init__( time_order = time_order, integration_measure = integration_measure, setup_solver = setup_solver) def coarse_mesh(self): M = self.initial_uniform_gridsize return fenics.UnitSquareMesh(M, M) def boundary_conditions(self): return [ fenics.DirichletBC( self.function_space.sub(1), (0., 0.), self.walls), fenics.DirichletBC( self.function_space.sub(2), self.hot_wall_temperature, self.hot_wall), fenics.DirichletBC( self.function_space.sub(2), self.cold_wall_temperature, self.cold_wall)] def deepcopy(self): sim = super().deepcopy() sim.hot_wall_temperature.assign(self.hot_wall_temperature) sim.cold_wall_temperature.assign(self.cold_wall_temperature) sim.initial_concentration.assign(self.initial_concentration) sim.hot_wall = self._HotWall() sim.cold_wall = self._ColdWall() sim.walls = self._Walls() return sim def cold_wall_heat_flux_integrand(self): nhat = fenics.FacetNormal(self.mesh.leaf_node()) p, u, T, C = fenics.split(self.solution.leaf_node()) mesh_function = fenics.MeshFunction( "size_t", self.mesh.leaf_node(), self.mesh.topology().dim() - 1) cold_wall_id = 2 self.cold_wall.mark(mesh_function, cold_wall_id) dot, grad = fenics.dot, fenics.grad ds = fenics.ds( domain = self.mesh.leaf_node(), subdomain_data = mesh_function, subdomain_id = cold_wall_id) return dot(grad(T), nhat)*ds
{ "repo_name": "geo-fluid-dynamics/phaseflow-fenics", "path": "phaseflow/abstract_heated_cavity_phasechange_simulation.py", "copies": "1", "size": "3577", "license": "mit", "hash": -2556043628665461000, "line_mean": 28.3278688525, "line_max": 121, "alpha_frac": 0.5342465753, "autogenerated": false, "ratio": 3.9921875, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.50264340753, "avg_score": null, "num_lines": null }
"""Abstract hierarchies of graphs in ReGraph. This module contains abstact data structures for graph hierarchies. A graph hierarchy is a DAG, whose nodes are graphs and whose directed edges represent homomorphisms between graphs. In addition, hierarchies are equipped with relations on graphs (which can be thought of as undirected edges or, alternatively, spans). """ from abc import ABC, abstractmethod import copy import json import os from regraph.exceptions import (HierarchyError, ReGraphError, InvalidHomomorphism, RewritingError) from regraph.category_utils import (compose, check_homomorphism, relation_to_span, pushout, get_unique_map_to_pullback, get_unique_map_from_pushout, is_monic, pullback, image_factorization, get_unique_map_to_pullback_complement) from regraph.rules import Rule from regraph.utils import (attrs_from_json, attrs_to_json, keys_by_value, normalize_typing_relation, test_strictness) class Hierarchy(ABC): """Abstract class for graph hierarchy objects in ReGraph. A graph hierarchy is a DAG, where nodes are graphs with attributes and edges are homomorphisms representing graph typing in the system. """ @abstractmethod def graphs(self, data=False): """Return a list of graphs in the hierarchy.""" pass @abstractmethod def typings(self, data=False): """Return a list of graph typing edges in the hierarchy.""" pass @abstractmethod def relations(self, data=False): """Return a list of relations.""" pass @abstractmethod def successors(self, node_id): """Return the set of successors.""" pass @abstractmethod def predecessors(self, node_id): """Return the set of predecessors.""" pass @abstractmethod def get_graph(self, graph_id): """Get a graph object associated to the node 'graph_id'.""" pass @abstractmethod def get_typing(self, source_id, target_id): """Get a typing dict associated to the edge 'source_id->target_id'.""" pass @abstractmethod def get_relation(self, left_id, right_id): """Get a relation dict associated to the rel 'left_id->target_id'.""" pass @abstractmethod def get_graph_attrs(self, graph_id): """Get attributes of a graph in the hierarchy. Parameters ---------- graph_id : hashable Id of the graph """ pass @abstractmethod def set_graph_attrs(self, node_id, attrs): """Set attributes of a graph in the hierarchy. Parameters ---------- graph_id : hashable Id of the graph """ pass @abstractmethod def get_typing_attrs(self, source, target): """Get attributes of a typing in the hierarchy. Parameters ---------- source : hashable Id of the source graph target : hashable Id of the target graph """ pass @abstractmethod def set_typing_attrs(self, source, target, attrs): """Set attributes of a typing in the hierarchy. Parameters ---------- source : hashable Id of the source graph target : hashable Id of the target graph """ pass @abstractmethod def get_relation_attrs(self, left, right): """Get attributes of a reltion in the hierarchy. Parameters ---------- left : hashable Id of the left graph right : hashable Id of the right graph """ pass @abstractmethod def set_relation_attrs(self, left, right, attrs): """Set attributes of a relation in the hierarchy. Parameters ---------- left : hashable Id of the left graph right : hashable Id of the right graph """ pass @abstractmethod def set_node_relation(self, left_graph, right_graph, left_node, right_node): """Set relation for a particular node. Parameters ---------- """ pass @abstractmethod def add_graph(self, graph_id, graph, attrs=None): """Add a new graph to the hierarchy. Parameters ---------- graph_id : hashable Id of a new node in the hierarchy graph : regraph.Graph Graph object corresponding to the new node of the hierarchy attrs : dict, optional Dictionary containing attributes of the new node """ pass @abstractmethod def add_graph_from_data(self, graph_id, node_list, edge_list, attrs=None): """Add a new graph to the hierarchy from the input node/edge lists. Parameters ---------- graph_id : hashable Id of a new node in the hierarchy node_list : iterable List of nodes (with attributes) edge_list : iterable List of edges (with attributes) graph_attrs : dict, optional Dictionary containing attributes of the new node """ pass @abstractmethod def add_empty_graph(self, graph_id, attrs=None): """"Add a new empty graph to the hierarchy. Parameters ---------- graph_id : hashable Id of a new node in the hierarchy graph_attrs : dict, optional Dictionary containing attributes of the new node """ pass @abstractmethod def add_typing(self, source, target, mapping, attrs=None): """Add homomorphism to the hierarchy. Parameters ---------- source : hashable Id of the source graph node of typing target : hashable Id of the target graph node of typing mapping : dict Dictionary representing a mapping of nodes from the source graph to target's nodes attrs : dict Dictionary containing attributes of the new typing edge Raises ------ HierarchyError This error is raised in the following cases: * source or target ids are not found in the hierarchy * a typing edge between source and target already exists * addition of an edge between source and target creates a cycle or produces paths that do not commute with some already existing paths InvalidHomomorphism If a homomorphisms from a graph at the source to a graph at the target given by `mapping` is not a valid homomorphism. """ pass @abstractmethod def add_relation(self, left, right, relation, attrs=None): """Add relation to the hierarchy. This method adds a relation between two graphs in the hierarchy corresponding to the nodes with ids `left` and `right`, the relation itself is defined by a dictionary `relation`, where a key is a node in the `left` graph and its corresponding value is a set of nodes from the `right` graph to which the node is related. Relations in the hierarchy are symmetric (see example below). Parameters ---------- left Id of the hierarchy's node represening the `left` graph right Id of the hierarchy's node represening the `right` graph relation : dict Dictionary representing a relation of nodes from `left` to the nodes from `right`, a key of the dictionary is assumed to be a node from `left` and its value a set of ids of related nodes from `right` attrs : dict Dictionary containing attributes of the new relation Raises ------ HierarchyError This error is raised in the following cases: * node with id `left`/`right` is not defined in the hierarchy; * node with id `left`/`right` is not a graph; * a relation between `left` and `right` already exists; * some node ids specified in `relation` are not found in the `left`/`right` graph. """ pass @abstractmethod def remove_graph(self, graph_id, reconnect=False): """Remove graph from the hierarchy. Removes a graph from the hierarchy, if the `reconnect` parameter is set to True, adds typing from the predecessors of the removed node to all its successors, by composing the homomorphisms (for every predecessor `p` and for every successor 's' composes two homomorphisms `p`->`node_id` and `node_id`->`s`, then removes `node_id` and all its incident edges, by which makes node's removal a procedure of 'forgetting' one level of 'abstraction'). Parameters ---------- node_id Id of a graph to remove reconnect : bool Reconnect the descendants of the removed node to its predecessors Raises ------ HierarchyError If graph with `node_id` is not defined in the hierarchy """ pass @abstractmethod def remove_typing(self, s, t): """Remove a typing from the hierarchy.""" pass @abstractmethod def remove_relation(self, left, right): """Remove a relation from the hierarchy.""" pass @abstractmethod def bfs_tree(self, graph, reverse=False): """BFS tree from the graph to all other reachable graphs.""" pass @abstractmethod def shortest_path(self, source, target): """Shortest path from 'source' to 'target'.""" pass @abstractmethod def copy_graph(self, graph_id, new_graph_id, attach_graphs=[]): """Create a copy of a graph in a hierarchy.""" pass @abstractmethod def relabel_graph_node(self, graph_id, node, new_name): """Rename a node in a graph of the hierarchy.""" pass @abstractmethod def relabel_graph(self, graph_id, new_graph_id): """Relabel a graph in the hierarchy. Parameters ---------- graph_id : hashable Id of the graph to relabel new_graph_id : hashable New graph id to assign to this graph """ pass @abstractmethod def relabel_graphs(self, mapping): """Relabel graphs in the hierarchy. Parameters ---------- mapping: dict A dictionary with keys being old graph ids and their values being new id's of the respective graphs. Raises ------ ReGraphError If new id's do not define a set of distinct graph id's. """ pass @abstractmethod def _update_mapping(self, source, target, mapping): """Update the mapping dictionary from source to target.""" pass @abstractmethod def _update_relation(self, left, right, relation): """Update the relation dictionaries (left and right).""" pass # Implemented methods def __str__(self): """String representation of the hierarchy.""" res = "" res += "\nGraphs:\n" for n, attrs in self.graphs(True): res += "\n{} {}\n".format(n, attrs) res += "\nTyping homomorphisms: \n" for n1, n2, attrs in self.typings(True): res += "{} -> {}: {}\n".format( n1, n2, attrs) res += "\nRelations:\n" for n1, n2, attrs in self.relations(True): res += "{}-{}: {}\n".format( n1, n2, attrs) return res def __eq__(self, hierarchy): """Hierarchy equality test.""" for node, attrs in self.graphs(True): if node not in hierarchy.graphs(): return False if attrs != hierarchy.get_graph_attrs(node): return False for s, t, attrs in self.typings(True): if (s, t) not in hierarchy.edges(): return False if attrs != hierarchy.get_typing_attrs(s, t): return False for n1, n2, attrs in self.relations(True): if (n1, n2) not in hierarchy.relations() and\ (n2, n1) not in hierarchy.relations(): return False if attrs != hierarchy.get_relation_attrs(n1, n2): return False return True def __ne__(self, hierarchy): """Non-equality operator.""" return not (self == hierarchy) def add_graph_from_json(self, graph_id, json_data, attrs=None): """Add a new graph to the hirarchy from its JSON-reprsentation. Parameters ---------- graph_id : hashable Id of the new graph json_data : dict JSON-like dictionary containing the representation of the graph attrs : dict Attributes to attach to the new graph """ node_list = [] edge_list = [] for n in json_data["nodes"]: node_list.append((n["id"], attrs_from_json(n["attrs"]))) for e in json_data["edges"]: edge_list.append((e["from"], e["to"], attrs_from_json(e["attrs"]))) self.add_graph_from_data(graph_id, node_list, edge_list, attrs) def to_json(self, rename_nodes=None): """Return json representation of the hierarchy. Parameters ---------- rename_nodes : dict, optional Dictionary specifying mapping of node ids from the original graph to its JSON-representation """ json_data = { "graphs": [], "typing": [], "relations": [] } for node, attrs in self.graphs(True): if rename_nodes and node in rename_nodes.keys(): node_id = rename_nodes[node] else: node_id = node json_data["graphs"].append({ "id": node_id, "graph": self.get_graph(node).to_json(), "attrs": attrs_to_json(attrs) }) for s, t, attrs in self.typings(True): if rename_nodes and s in rename_nodes.keys(): s_id = rename_nodes[s] else: s_id = s if rename_nodes and t in rename_nodes.keys(): t_id = rename_nodes[t] else: t_id = t json_data["typing"].append({ "from": s_id, "to": t_id, "mapping": self.get_typing(s, t), "attrs": attrs_to_json(attrs) }) visited = set() for u, v, attrs in self.relations(True): if rename_nodes and u in rename_nodes.keys(): u_id = rename_nodes[u] else: u_id = u if rename_nodes and v in rename_nodes.keys(): v_id = rename_nodes[v] else: v_id = v if not (u, v) in visited and not (v, u) in visited: visited.add((u, v)) json_data["relations"].append({ "from": u_id, "to": v_id, "rel": { a: list(b) for a, b in self.get_relation(u, v).items() }, "attrs": attrs_to_json(attrs) }) return json_data @classmethod def from_json(cls, json_data, ignore=None): """Create a hierarchy object from JSON-representation. Parameters ---------- json_data : dict JSON-like dict containing representation of a hierarchy ignore : dict, optional Dictionary containing components to ignore in the process of converting from JSON, dictionary should respect the following format: { "graphs": <collection of ids of graphs to ignore>, "rules": <collection of ids of rules to ignore>, "typing": <collection of tuples containing typing edges to ignore>, "rule_typing": <collection of tuples containing rule typing edges to ignore>>, "relations": <collection of tuples containing relations to ignore>, } Returns ------- hierarchy : regraph.hierarchies.Hierarchy """ hierarchy = cls() # add graphs for graph_data in json_data["graphs"]: if ignore is not None and\ "graphs" in ignore.keys() and\ graph_data["id"] in ignore["graphs"]: pass else: if "attrs" not in graph_data.keys(): attrs = dict() else: attrs = attrs_from_json(graph_data["attrs"]) hierarchy.add_graph_from_json( graph_data["id"], graph_data["graph"], attrs) # add typing for typing_data in json_data["typing"]: if ignore is not None and\ "typing" in ignore.keys() and\ (typing_data["from"], typing_data["to"]) in ignore["typing"]: pass else: if "attrs" not in typing_data.keys(): attrs = dict() else: attrs = attrs_from_json(typing_data["attrs"]) hierarchy.add_typing( typing_data["from"], typing_data["to"], typing_data["mapping"], attrs) # add relations for relation_data in json_data["relations"]: from_g = relation_data["from"] to_g = relation_data["to"] if ignore is not None and\ "relations" in ignore.keys() and\ ((from_g, to_g) in ignore["relations"] or (to_g, from_g) in ignore["relations"]): pass else: if "attrs" not in relation_data.keys(): attrs = dict() else: attrs = attrs_from_json(relation_data["attrs"]) if (from_g, to_g) not in hierarchy.relations(): hierarchy.add_relation( relation_data["from"], relation_data["to"], {a: set(b) for a, b in relation_data["rel"].items()}, attrs ) return hierarchy @classmethod def load(cls, filename, ignore=None): """Load the hierarchy from a file. Parameters ---------- filename : str Path to the file containing JSON-representation of the hierarchy ignore : dict Dictionary with graph elemenets to ignore when loading Returns ------- hierarchy : regraph.hierarchies.Hierarchy """ if os.path.isfile(filename): with open(filename, "r+") as f: json_data = json.loads(f.read()) hierarchy = cls.from_json(json_data, ignore) return hierarchy else: raise ReGraphError("File '{}' does not exist!".format(filename)) def export(self, filename): """Export the hierarchy to a file.""" with open(filename, 'w') as f: j_data = self.to_json() json.dump(j_data, f) def adjacent_relations(self, g): """Return a list of related graphs.""" if g not in self.graphs(): raise HierarchyError( "Graph node '{}' does not exist in the hierarchy!".format(g)) return [ r for l, r in self.relations() if l == g ] + [ l for l, r in self.relations() if r == g ] def node_type(self, graph_id, node_id): """Get a list of the immediate types of a node.""" if graph_id not in self.graphs(): raise HierarchyError( "Graph '{}' is not defined in the hierarchy!".format(graph_id) ) if node_id not in self.get_graph(graph_id).nodes(): raise HierarchyError( "Graph '{}'' does not have a node with id '{}'!".format( graph_id, node_id) ) types = dict() for successor in self.successors(graph_id): mapping = self.get_typing(graph_id, successor) if node_id in mapping.keys(): types[successor] = mapping[node_id] return types def get_ancestors(self, graph_id): """Return ancestors of a graph with the typing morphisms.""" ancestors = dict() for pred in self.predecessors(graph_id): typing = self.get_typing(pred, graph_id) pred_ancestors = self.get_ancestors(pred) if pred in ancestors.keys(): ancestors.update(pred_ancestors) else: ancestors[pred] = typing for anc, anc_typing in pred_ancestors.items(): if anc in ancestors.keys(): ancestors[anc].update(compose(anc_typing, typing)) else: ancestors[anc] = compose(anc_typing, typing) return ancestors def get_descendants(self, graph_id, maybe=None): """Return descendants of a graph with the typing morphisms.""" descendants = dict() for successor in self.successors(graph_id): mapping = self.get_typing(graph_id, successor) typing_descendants = self.get_descendants(successor, maybe) if successor in descendants.keys(): descendants[successor].update(mapping) else: descendants[successor] = mapping for anc, typ in typing_descendants.items(): if anc in descendants.keys(): descendants[anc].update(compose(mapping, typ)) else: descendants[anc] = compose(mapping, typ) return descendants def compose_path_typing(self, path): """Compose homomorphisms along the path. Parameters ---------- path : list List of nodes of the hierarchy forming a path Returns ------- If source node of the path is a graph homomorphism : dict Dictionary containg the typing of the nodes from the source graph of the path by the nodes of the target graph if source node of the path is a rule lhs_homomorphism : dict Dictionary containg the typing of the nodes from the left-hand side of the source rule of the path by the nodes of the target graph rhs_homomorphism : dict Dictionary containg the typing of the nodes from the right-hand side of the source rule of the path by the nodes of the target graph """ s = path[0] t = path[1] homomorphism = self.get_typing(s, t) for i in range(2, len(path)): s = path[i - 1] t = path[i] homomorphism = compose( homomorphism, self.get_typing(s, t) ) return homomorphism def _get_backward_propagation_rule(self, origin_id, ancestor, rule, instance=None, p_typing=None): """Compute a propagation rule to the ancestor.""" if p_typing is None: p_typing = {} origin_typing = self.get_typing(ancestor, origin_id) # Compute L_G l_g, l_g_g, l_g_l = pullback( self.get_graph(ancestor), rule.lhs, self.get_graph(origin_id), origin_typing, instance) # Compute canonical P_G canonical_p_g, p_g_l_g, p_g_p = pullback( l_g, rule.p, rule.lhs, l_g_l, rule.p_lhs) # Remove controlled things from P_G if ancestor in p_typing.keys(): l_g_factorization = { keys_by_value(l_g_g, k)[0]: v for k, v in p_typing[ancestor].items() } p_g_nodes_to_remove = set() for n in canonical_p_g.nodes(): l_g_node = p_g_l_g[n] # If corresponding L_G node is specified in # the controlling relation, remove all # the instances of P nodes not mentioned # in this relations if l_g_node in l_g_factorization.keys(): p_nodes = l_g_factorization[l_g_node] if p_g_p[n] not in p_nodes: del p_g_p[n] del p_g_l_g[n] p_g_nodes_to_remove.add(n) for n in p_g_nodes_to_remove: canonical_p_g.remove_node(n) rule = Rule(p=canonical_p_g, lhs=l_g, p_lhs=p_g_l_g) return rule, l_g_g, l_g_l, p_g_p def get_rule_hierarchy(self, origin_id, rule, instance=None, p_typing=None, rhs_typing=None): """Find rule hierarchy corresponding to the input rewriting. Parameters ---------- graph_id : hashable Id of the graph to rewrite rule : regraph.Rule Rewriting rule instance : dict, optional Instance of the rule in the graph. If not specified, the identity of the left-hand side is used p_typing : dict Relations controlling backward propagation. The keys are ancestors of the rewritten graph, values are dictionaries containing individual relations between the nodes of a given ancestor and the preserved part of the rule rhs_typing : dict Relation controlling forward propagation. The keys are descendants of the rewritten graph, values are dictionaries containing individual relations between the right-hand side of the rule and the nodes of a given descendant Returns ------- rule_hierarchy : dictionary Dictionary contains two keys: (1) `rules` whose value is a dictionary with id's of the graphs in the hierarchy and the computed propagation rules; (2) `rule_homomorphisms` whose value is a dictionary with pairs of graphs in the hierarchy and the computed homomorphisms between rules. """ instance, p_typing, rhs_typing = self._check_rule_instance_typing( origin_id, rule, instance, p_typing, rhs_typing, False) instances = {origin_id: instance} rule_hierarchy = { "rules": {origin_id: rule}, "rule_homomorphisms": {} } # Compute rules and their map to the original rule l_g_ls = {} # LHS's of rule liftings to LHS of the original rule p_g_ps = {} # interfaces of rule liftings to LHS of the original rule ancestors = self.get_ancestors(origin_id) descendants = self.get_descendants(origin_id) # Compute rule liftings for ancestor, origin_typing in ancestors.items(): # Compute L_G l_g, l_g_g, l_g_l = pullback( self.get_graph(ancestor), rule.lhs, self.get_graph(origin_id), origin_typing, instance) # Compute canonical P_G canonical_p_g, p_g_l_g, p_g_p = pullback( l_g, rule.p, rule.lhs, l_g_l, rule.p_lhs) # Remove controlled things from P_G if ancestor in p_typing.keys(): l_g_factorization = { keys_by_value(l_g_g, k)[0]: v for k, v in p_typing[ancestor].items() } p_g_nodes_to_remove = set() for n in canonical_p_g.nodes(): l_g_node = p_g_l_g[n] # If corresponding L_G node is specified in # the controlling relation, remove all # the instances of P nodes not mentioned # in this relations if l_g_node in l_g_factorization.keys(): p_nodes = l_g_factorization[l_g_node] if p_g_p[n] not in p_nodes: del p_g_p[n] del p_g_l_g[n] p_g_nodes_to_remove.add(n) for n in p_g_nodes_to_remove: canonical_p_g.remove_node(n) rule_hierarchy["rules"][ancestor] =\ Rule(p=canonical_p_g, lhs=l_g, p_lhs=p_g_l_g) instances[ancestor] = l_g_g l_g_ls[ancestor] = l_g_l p_g_ps[ancestor] = p_g_p l_l_ts = {} # Original rule LHS to the LHS of rule projections p_p_ts = {} # Original rule interface to the inter. of rule projections r_r_ts = {} # Original rule RHS to the RHS of rule projections # Compute rule projections for descendant, origin_typing in descendants.items(): # Compute canonical P_T l_t, l_l_t, l_t_t = image_factorization( rule.lhs, self.get_graph(descendant), compose(instance, origin_typing)) # Compute canonical R_T r_t, l_t_r_t, r_r_t = pushout( rule.p, l_t, rule.rhs, compose(rule.p_lhs, l_l_t), rule.p_rhs) # Modify P_T and R_T according to the controlling # relation rhs_typing if descendant in rhs_typing.keys(): r_t_factorization = { r_r_t[k]: v for k, v in rhs_typing[descendant].items() } for n in l_t.nodes(): if l_t_r_t[n] not in r_t_factorization: r_t_factorization[l_t_r_t[n]] = {l_t_t[n]} else: if not {l_t_t[n]}.issubset(r_t_factorization[l_t_r_t[n]]): r_t_factorization[l_t_r_t[n]].add(l_t_t[n]) added_t_nodes = set() for n in r_t.nodes(): if n in r_t_factorization.keys(): # If corresponding R_T node is specified in # the controlling relation add nodes of T # that type it to P t_nodes = r_t_factorization[n] for t_node in t_nodes: if t_node not in l_t_t.values() and\ t_node not in added_t_nodes: new_p_node = l_t.generate_new_node_id( t_node) l_t.add_node(new_p_node) added_t_nodes.add(t_node) l_t_r_t[new_p_node] = n l_t_t[new_p_node] = t_node else: l_t_r_t[keys_by_value(l_t_t, t_node)[0]] = n # check if some nodes need merging inverse_factorization = dict() for k, v in r_t_factorization.items(): for vv in v: if vv not in inverse_factorization: inverse_factorization[vv] = set([k]) else: inverse_factorization[vv].add(k) for k, v in inverse_factorization.items(): if len(v) > 1: merged_node = r_t.merge_nodes(v) for vv in v: l_t_nodes = keys_by_value(l_t_r_t, vv) for l_t_node in l_t_nodes: l_t_r_t[l_t_node] = merged_node r_nodes = keys_by_value(r_r_t, vv) for r_node in r_nodes: r_r_t[r_node] = merged_node rule_hierarchy["rules"][descendant] =\ Rule(lhs=l_t, p=l_t, rhs=r_t, p_rhs=l_t_r_t) instances[descendant] = l_t_t l_l_ts[descendant] = l_l_t p_p_ts[descendant] = {k: l_l_t[v] for k, v in rule.p_lhs.items()} r_r_ts[descendant] = r_r_t # Compute homomorphisms between rules for graph_id, graph_rule in rule_hierarchy["rules"].items(): if graph_id in ancestors: for successor in self.successors(graph_id): old_typing = self.get_typing(graph_id, successor) if successor == origin_id: graph_lhs_successor_lhs = l_g_ls[graph_id] graph_p_successor_p = p_g_ps[graph_id] rule_hierarchy["rule_homomorphisms"][ (graph_id, successor)] = ( graph_lhs_successor_lhs, graph_p_successor_p, compose( graph_p_successor_p, rule_hierarchy["rules"][successor].p_rhs) ) else: l_graph_successor = compose( instances[graph_id], old_typing) # already lifted to the successor if successor in ancestors: graph_rule = rule_hierarchy["rules"][graph_id] suc_rule = rule_hierarchy["rules"][successor] graph_lhs_successor_lhs = get_unique_map_to_pullback( suc_rule.lhs.nodes(), instances[successor], l_g_ls[successor], compose( instances[graph_id], old_typing), l_g_ls[graph_id]) graph_p_successor_p = get_unique_map_to_pullback( suc_rule.p.nodes(), suc_rule.p_lhs, p_g_ps[successor], compose( graph_rule.p_lhs, graph_lhs_successor_lhs), p_g_ps[graph_id]) rule_hierarchy["rule_homomorphisms"][ (graph_id, successor)] = ( graph_lhs_successor_lhs, graph_p_successor_p, graph_p_successor_p ) elif successor in descendants: rule_hierarchy["rule_homomorphisms"][(graph_id, successor)] = ( compose(l_g_ls[graph_id], l_l_ts[successor]), compose(p_g_ps[graph_id], p_p_ts[successor]), compose( compose(p_g_ps[graph_id], rule.p_rhs), r_r_ts[successor]) ) # didn't touch the successor or projected to it else: pass if graph_id in descendants: for predecessor in self.predecessors(graph_id): old_typing = self.get_typing(predecessor, graph_id) if predecessor == origin_id: predecessor_l_graph_l = l_l_ts[graph_id] predecessor_p_graph_p = p_p_ts[graph_id] predecessor_rhs_graph_rhs = r_r_ts[graph_id] rule_hierarchy["rule_homomorphisms"][ (predecessor, graph_id)] = ( predecessor_l_graph_l, predecessor_p_graph_p, predecessor_rhs_graph_rhs ) else: # already projected to the predecessor if predecessor in descendants: l_pred_graph = compose( instances[predecessor], old_typing) predecessor_l_graph_l = {} for k, v in instances[ predecessor].items(): predecessor_l_graph_l[k] = keys_by_value( instances[graph_id], l_pred_graph[k])[0] predecessor_rhs_graph_rhs = get_unique_map_from_pushout( rule_hierarchy["rules"][predecessor].rhs.nodes(), rule_hierarchy["rules"][predecessor].p_rhs, r_r_ts[predecessor], compose( predecessor_l_graph_l, rule_hierarchy["rules"][graph_id].p_rhs), r_r_ts[graph_id]) rule_hierarchy["rule_homomorphisms"][ (predecessor, graph_id)] = ( predecessor_l_graph_l, predecessor_l_graph_l, predecessor_rhs_graph_rhs ) # didn't touch the predecessor or lifter to it else: pass return rule_hierarchy, instances def refine_rule_hierarchy(self, rule_hierarchy, instances): """Refine the input rule hierarchy to its reversible version. Parameters ---------- rule_hierarchy : dict Rule hierarchy to refine instances : dict of dict Dictionary containing ids of the graphs in the hierarchy as keys and dictionaries represening instances of the corresponding rules Returns ------- new_instances : dict of dict Dictionary containing ids of the graphs in the hierarchy as keys and dictionaries represening new instances of the corresponding refined rules """ new_lhs_instances = {} new_rules = {} new_rule_homomorphisms = {} # Refine individual rules for graph, rule in rule_hierarchy["rules"].items(): # refine rule new_lhs_instance = rule.refine( self.get_graph(graph), instances[graph]) new_lhs_instances[graph] = new_lhs_instance # Update rule homomorphisms for (source, target), (lhs_h, p_h, rhs_h) in rule_hierarchy[ "rule_homomorphisms"].items(): typing = self.get_typing(source, target) source_rule = rule_hierarchy["rules"][source] target_rule = rule_hierarchy["rules"][target] for node in source_rule.lhs.nodes(): if node not in lhs_h.keys(): source_node = new_lhs_instances[source][node] target_node = typing[source_node] target_lhs_nodes = keys_by_value( new_lhs_instances[target], target_node) if len(target_lhs_nodes) == 1: target_lhs_node = target_lhs_nodes[0] else: target_rule._add_node_lhs(target_node) target_lhs_node = target_node new_lhs_instances[target][target_lhs_node] = target_node lhs_h[node] = target_lhs_node if node in source_rule.p_lhs.values(): source_p_node = keys_by_value( source_rule.p_lhs, node)[0] target_p_node = keys_by_value( target_rule.p_lhs, target_lhs_node)[0] p_h[source_p_node] = target_p_node source_rhs_node = source_rule.p_rhs[source_p_node] target_rhs_node = target_rule.p_rhs[target_p_node] rhs_h[source_rhs_node] = target_rhs_node # Add necessary node attrs for node in source_rule.lhs.nodes(): t_node = lhs_h[node] target_rule._add_node_attrs_lhs(t_node, source_rule.lhs.get_node(node)) # Add necessary edges for s_lhs_s, s_lhs_t in source_rule.lhs.edges(): t_lhs_s = lhs_h[s_lhs_s] t_lhs_t = lhs_h[s_lhs_t] if not target_rule.lhs.exists_edge(t_lhs_s, t_lhs_t): target_rule._add_edge_lhs( t_lhs_s, t_lhs_t, source_rule.lhs.get_edge(s_lhs_s, s_lhs_t)) else: target_rule._add_edge_attrs_lhs( t_lhs_s, t_lhs_t, source_rule.lhs.get_edge(s_lhs_s, s_lhs_t)) # If merge is performed, add all the instances of the merged nodes for (source, target), (lhs_h, p_h, rhs_h) in rule_hierarchy[ "rule_homomorphisms"].items(): source_rule = rule_hierarchy["rules"][source] target_rule = rule_hierarchy["rules"][target] typing = self.get_typing(source, target) if len(target_rule.merged_nodes()) > 0: for rhs_node, p_nodes in target_rule.merged_nodes().items(): # rhs_node_instances = keys_by_value(typing, rhs_node) for p_node in p_nodes: lhs_node_instance = new_lhs_instances[target][ target_rule.p_lhs[p_node]] source_nodes = [ n for n in keys_by_value(typing, lhs_node_instance) ] source_nodes_to_add = [ s for s in source_nodes if s not in new_lhs_instances[source].values() ] for n in source_nodes_to_add: lhs_s_node = source_rule.lhs.generate_new_node_id(n) p_s_node, rhs_s_node = source_rule._add_node_lhs( lhs_s_node) # Add the instances of the new lhs node new_lhs_instances[source][lhs_s_node] = n # Add maps to the target for the new lhs node lhs_h[lhs_s_node] = target_rule.p_lhs[p_node] p_h[p_s_node] = p_node rhs_h[rhs_s_node] = rhs_node # Add identity rules where needed if len(rule_hierarchy["rules"]) == 0: # if the are no rules in the rule hierarchy, # create the identity rule for every graph for graph in self.graphs(): rule_hierarchy["rules"][graph] = Rule.identity_rule() new_lhs_instances[graph] = dict() for (s, t) in self.typings(): rule_hierarchy["rule_homomorphisms"][(s, t)] = ( dict(), dict(), dict()) else: # add identity rules where needed # to preserve the info on p/rhs_typing # add ancestors that are not included in rule hierarchy for graph, rule in rule_hierarchy["rules"].items(): for ancestor, typing in self.get_ancestors(graph).items(): if ancestor not in rule_hierarchy["rules"] and\ ancestor not in new_rules: if len(rule.merged_nodes()) > 0: # We create a non-empty rule that preserves # the info on typing l_pred, l_pred_pred, l_pred_l_graph = pullback( self.get_graph(ancestor), rule.lhs, self.get_graph(graph), typing, new_lhs_instances[graph]) new_rules[ancestor] = Rule(p=l_pred, lhs=l_pred) new_lhs_instances[ancestor] = l_pred_pred r_pred_r_graph = { v: rule.p_rhs[k] for k, v in l_pred_l_graph.items() } else: # We create an empty rule if no merges are performed new_rules[ancestor] = Rule.identity_rule() new_lhs_instances[ancestor] = dict() l_pred_l_graph = dict() r_pred_r_graph = dict() for successor in self.successors(ancestor): if successor in rule_hierarchy["rules"]: if successor == graph: new_rule_homomorphisms[ (ancestor, graph)] = ( l_pred_l_graph, l_pred_l_graph, r_pred_r_graph ) else: path = self.shortest_path(graph, successor) lhs_h, p_h, rhs_h = rule_hierarchy[ "rule_homomorphisms"][ (path[0], path[1])] for i in range(2, len(path)): new_lhs_h, new_p_h, new_rhs_h =\ rule_hierarchy[ "rule_homomorphisms"][ (path[i - 1], path[i])] lhs_h = compose(lhs_h, new_lhs_h) p_h = compose(p_h, new_p_h) rhs_h = compose(rhs_h, new_rhs_h) new_rule_homomorphisms[ (ancestor, successor)] = ( compose(l_pred_l_graph, lhs_h), compose(l_pred_l_graph, p_h), compose(r_pred_r_graph, rhs_h) ) if successor in new_rules: lhs_h = { k: keys_by_value( new_lhs_instances[successor], self.get_typing( ancestor, successor)[v])[0] for k, v in new_lhs_instances[ ancestor].items() } new_rule_homomorphisms[ (ancestor, successor)] = ( lhs_h, lhs_h, lhs_h ) for predecessor in self.predecessors(ancestor): if predecessor in rule_hierarchy["rules"] or\ predecessor in new_rules: lhs_h = { k: keys_by_value( new_lhs_instances[ancestor], self.get_typing( predecessor, ancestor)[v])[0] for k, v in new_lhs_instances[ predecessor].items() } new_rule_homomorphisms[ (predecessor, ancestor)] = ( lhs_h, lhs_h, lhs_h ) for descendant, typing in self.get_descendants(graph).items(): if descendant not in rule_hierarchy["rules"] and\ descendant not in new_rules: l_suc, l_graph_l_suc, l_suc_suc = image_factorization( rule.lhs, self.get_graph(descendant), compose( new_lhs_instances[graph], typing)) new_rules[descendant] = Rule(p=l_suc, lhs=l_suc) new_lhs_instances[descendant] = l_suc_suc p_graph_p_suc = { k: l_graph_l_suc[v] for k, v in rule.p_lhs.items() } for predecessor in self.predecessors(descendant): if predecessor in rule_hierarchy["rules"]: if predecessor == graph: new_rule_homomorphisms[ (predecessor, descendant)] = ( l_graph_l_suc, p_graph_p_suc, p_graph_p_suc ) else: path = self.shortest_path( predecessor, graph) lhs_h, p_h, rhs_h = rule_hierarchy[ "rule_homomorphisms"][ (path[0], path[1])] for i in range(2, len(path)): new_lhs_h, new_p_h, new_rhs_h =\ rule_hierarchy[ "rule_homomorphisms"][ (path[i - 1], path[i])] lhs_h = compose(lhs_h, new_lhs_h) p_h = compose(p_h, new_p_h) rhs_h = compose(rhs_h, new_rhs_h) new_rule_homomorphisms[ (predecessor, descendant)] = ( compose(lhs_h, l_graph_l_suc), compose(p_h, p_graph_p_suc), compose(rhs_h, p_graph_p_suc) ) if predecessor in new_rules: lhs_h = { k: keys_by_value( new_lhs_instances[descendant], self.get_typing( predecessor, descendant)[v])[0] for k, v in new_lhs_instances[ predecessor].items() } new_rule_homomorphisms[ (predecessor, descendant)] = ( lhs_h, lhs_h, lhs_h ) for successor in self.successors(descendant): if successor in rule_hierarchy["rules"] or\ successor in new_rules: lhs_h = { k: keys_by_value( new_lhs_instances[successor], self.get_typing( descendant, successor)[v])[0] for k, v in new_lhs_instances[ descendant].items() } new_rule_homomorphisms[ (descendant, successor)] = ( lhs_h, lhs_h, lhs_h ) rule_hierarchy["rules"].update(new_rules) rule_hierarchy["rule_homomorphisms"].update( new_rule_homomorphisms) return new_lhs_instances def unique_graph_id(self, prefix): """Generate a new graph id starting with a prefix.""" if prefix not in self.graphs(): return prefix i = 0 while "{}_{}".format(prefix, i) in self.graphs(): i += 1 return "{}_{}".format(prefix, i) def duplicate_subgraph(self, graph_dict, attach_graphs=[]): """Duplicate a subgraph induced by the set of nodes. Parameters ---------- graph_dict : dict Dictionary contaning names of graphs to duplicate as keys and their new IDs in the hierarchy as values attach_graphs : list, optional List of not duplicated graph IDs that should be reattached to the duplicated graphs, if empty, duplicated subgraph is disconnected from the rest of the hierarchy """ old_graphs = self.graphs() for original, new in graph_dict.items(): if new in old_graphs: raise HierarchyError( "Cannot duplicate the graph '{}' as '{}': ".format( original, new) + "the graph '{}' ".format(new) + "already exists in the hierarchy!") # copy graphs for original, new in graph_dict.items(): self.copy_graph(original, new, attach_graphs) # copy typing between duplicated graphs visited = set() for g in graph_dict.keys(): preds = [ p for p in self.predecessors(g) if p in graph_dict.keys() and (p, g) not in visited] sucs = [ p for p in self.successors(g) if p in graph_dict.keys() and (g, p) not in visited] for s in sucs: self.add_typing( graph_dict[g], graph_dict[s], self.get_typing(g, s)) visited.add((g, s)) for p in preds: self.add_typing( graph_dict[p], graph_dict[g], self.get_typing(p, g)) visited.add((p, g)) def relation_to_span(self, left, right, edges=False, attrs=False): """Convert relation to a span. This method computes the span of the form `left` <- `common` -> `right` from a binary symmetric relation between two graphs in the hierarchy. Parameters ---------- left Id of the hierarchy's node represening the `left` graph right Id of the hierarchy's node represening the `right` graph edges : bool, optional If True, maximal set of edges is added to the common part graph attrs : bool, optional If True, maximal dict of attrs is added to the nodes of the common part graph Returns ------- common : nx.(Di)Graph Graph representing the common part graph induced by the relation left_h : dict Homomorphism from the common part graph to the left graph of the relation right_h : dict Homomorphism from the common part graph to the right graph of the relation Raises ------ HierarchyError If nodes corresponding to either `left` or `right` ids do not exist in the hierarchy, or there is no relation between them. """ if left not in self.nodes(): raise HierarchyError( "Node '{}' is not defined in the hierarchy!".format(left)) if right not in self.nodes(): raise HierarchyError( "Node '{}' is not defined in the hierarchy!".format(right)) if (left, right) not in self.relations() and\ (right, left) not in self.relations(): raise HierarchyError( "Relation between graphs '{}' and '{}' is not defined".format( left, right) ) common, left_h, right_h = relation_to_span( self.get_graph(left), self.get_graph(right), self.get_relation(left, right), edges, attrs) return common, left_h, right_h def _get_graph_pattern_typing(self, graph_id, pattern, pattern_typing, advanced=False): # Check that 'typing_graph' and 'pattern_typing' are correctly # specified descendants = self.get_descendants(graph_id) if pattern_typing is not None: for typing_graph, _ in pattern_typing.items(): if typing_graph not in descendants.keys(): raise HierarchyError( "Pattern typing graph '{}' is not in " "the (transitive) typing graphs of '{}'!".format( typing_graph, graph_id) ) # Check pattern typing is a valid homomorphism if not advanced: for typing_graph, mapping in pattern_typing.items(): try: check_homomorphism( pattern, self.get_graph(typing_graph), mapping ) except InvalidHomomorphism as e: raise ReGraphError( "Specified pattern is not valid in the " "hierarchy (it produces the following error: " "{}) ".format(e) ) graph_typing = { typing_graph: self.get_typing(graph_id, typing_graph) for typing_graph in pattern_typing.keys() } return graph_typing def find_matching(self, graph_id, pattern, pattern_typing=None, nodes=None): """Find an instance of a pattern in a specified graph. Parameters ---------- graph_id : hashable Id of a graph in the hierarchy to search for matches pattern : Graph object A pattern to match pattern_typing : dict A dictionary that specifies a typing of a pattern, keys of the dictionary -- graph id that types a pattern, this graph should be among parents of the `graph_id` graph; values are mappings of nodes from pattern to the typing graph; nodes : iterable Subset of nodes where matching should be performed Returns ------- instances : list of dict List of matched instances """ if pattern_typing is None: pattern_typing = dict() graph_typing = self._get_graph_pattern_typing( graph_id, pattern, pattern_typing) instances = self.get_graph(graph_id).find_matching( pattern, nodes, graph_typing, pattern_typing) return instances def advanced_find_matching(self, graph_id, pattern_dict, pattern_typing=None, nodes=None): """Find matching of a pattern in a graph in an advanced way.""" if pattern_typing is None: pattern_typing = dict() graph_typing = self._get_graph_pattern_typing( graph_id, pattern_dict, pattern_typing, True) instances = self.get_graph(graph_id).advanced_find_matching( pattern_dict, nodes, graph_typing, pattern_typing) return instances def rewrite(self, graph_id, rule, instance, p_typing=None, rhs_typing=None, strict=False): """Rewrite and propagate the changes backward & forward. Rewriting in the hierarchy cosists of an application of the SqPO-rewriting rule (given by the 'rule' parameter) to a graph in the hierarchy. Such rewriting often triggers a set of changes that are applied to other graphs and homomorphisms in the hierarchy, which are necessary to ensure that the hierarchy stays consistent. If the rule is restrictive (deletes nodes/edges/attrs or clones nodes), in general, the respective changes to all the graphs (transitively) typed by the graph subject to rewriting are made. On the other hand, if the rule is relaxing (adds nodes/edges/attrs or merges nodes), in general, the respective changes to all the graphs that (tansitively) type the graph subject to rewriting are made. Parameters ---------- graph_id Id of the graph in the hierarchy to rewrite rule : regraph.rule.Rule Rule object to apply instance : dict, optional Dictionary containing an instance of the lhs of the rule in the graph subject to rewriting, by default, tries to construct identity morphism of the nodes of the pattern p_typing : dict, optional Dictionary containing typing of graphs in the hierarchy by the interface of the rule, keys are ids of hierarchy graphs, values are dictionaries containing the mapping of nodes from the hierarchy graphs to the inteface nodes (note that a node from a graph can be typed by a set of nodes in the interface of the rule, e.g. if we want to perform cloning of some types, etc). rhs_typing : dict, optional Dictionary containing typing of the rhs by graphs of the hierarchy, keys are ids of hierarchy graphs, values are dictionaries containing the mapping of nodes from the rhs to the nodes of the typing graph given by the respective key of the value (note that a node from the rhs can be typed by a set of nodes of some graph, e.g. if we want to perform merging of some types, etc). strict : bool, optional Rewriting is strict when propagation down is not allowed Raises ------ HierarchyError If the graph is not in the database RewritingError If the provided p and rhs typing are inconsistent """ # Type check the input rule, its instance and typing instance, p_typing, rhs_typing = self._check_rule_instance_typing( graph_id, rule, instance, p_typing, rhs_typing, strict) # Perform a restrictive rewrite p_g_m, g_m_g = self._restrictive_rewrite(graph_id, rule, instance) # Propagate backward and fix broken homomorphisms self._propagate_backward( graph_id, rule, instance, p_g_m, g_m_g, p_typing) # Propagate forward and fix broken homomorphisms rhs_g_prime = self._expansive_rewrite_and_propagate_forward( graph_id, rule, instance, p_g_m, rhs_typing) return rhs_g_prime def apply_rule_hierarchy(self, rule_hierarchy, instances): """Apply rule hierarchy. Parameters ---------- rule_hierarchy : dict Dictionary containing the input rule hierarchy instances : dict Dictionary containing an instance for every rule in the hierarchy Returns ------- rhs_instances : dict Dictionary containing the RHS instances for every graph in the hierarchy """ # Check if the rule hierarchy is applicable self._check_applicability(rule_hierarchy, instances) updated_graphs = {} # Apply rules to the hierarchy for graph_id, rule in rule_hierarchy["rules"].items(): instance = instances[graph_id] graph_obj = self.get_graph(graph_id) if rule.is_restrictive(): p_g_m, g_m_g = self._restrictive_rewrite( graph_id, rule, instance) else: p_g_m = { k: instance[v] for k, v in rule.p_lhs.items() } g_m_g = { n: n for n in graph_obj.nodes() } updated_graphs[graph_id] = { "p_g_m": p_g_m, "g_m_g": g_m_g } # Restore homomorphisms after restrictive rewrite # and backward propagation for (source, target), (_, p_h, _) in rule_hierarchy[ "rule_homomorphisms"].items(): old_typing = self.get_typing(source, target) source_m_target_m = get_unique_map_to_pullback_complement( updated_graphs[target]["p_g_m"], updated_graphs[target]["g_m_g"], p_h, updated_graphs[source]["p_g_m"], compose( updated_graphs[source]["g_m_g"], old_typing) ) self._update_mapping(source, target, source_m_target_m) for graph_id, rule in rule_hierarchy["rules"].items(): graph_obj = self.get_graph(graph_id) if rule.is_relaxing(): r_g_prime, g_m_g_prime = self._expansive_rewrite( graph_id, rule, updated_graphs[graph_id]["p_g_m"]) else: g_m_g_prime = { n: n for n in graph_obj.nodes() } r_g_prime = { v: updated_graphs[graph_id]["p_g_m"][k] for k, v in rule.p_rhs.items() } updated_graphs[graph_id] = { "g_m_g_prime": g_m_g_prime, "r_g_prime": r_g_prime } # Restore homomorphisms after expansive rewrite # and forward propagation for (source, target), (_, _, rhs_h) in rule_hierarchy[ "rule_homomorphisms"].items(): old_typing = self.get_typing(source, target) source_p_target_p = get_unique_map_from_pushout( self.get_graph(source).nodes(), updated_graphs[source]["g_m_g_prime"], updated_graphs[source]["r_g_prime"], compose( old_typing, updated_graphs[target]["g_m_g_prime"]), compose( rhs_h, updated_graphs[target]["r_g_prime"]) ) self._update_mapping(source, target, source_p_target_p) return {k: v["r_g_prime"] for k, v in updated_graphs.items()} def _check_rule_instance_typing(self, origin_id, rule, instance, p_typing, rhs_typing, strict): # Normalize the instance, p_typing and rhs_typing if instance is None: instance = { n: n for n in rule.lhs.nodes() } if p_typing is None: p_typing = dict() else: p_typing = normalize_typing_relation(p_typing) if rhs_typing is None: rhs_typing = dict() else: rhs_typing = normalize_typing_relation(rhs_typing) # Check that the instance is valid try: check_homomorphism( rule.lhs, self.get_graph(origin_id), instance, total=True ) except InvalidHomomorphism as e: raise RewritingError( "Homomorphism from the pattern to the instance subgraph " "is not valid, got: '{}'".format(e)) # Check that the instance is a mono if not is_monic(instance): raise RewritingError( "Homomorphism from the pattern to the instance subgraph " "is not injective") # Check p_typing does not retype nodes for graph_id, typing in p_typing.items(): graph_to_origin = self.get_typing(graph_id, origin_id) for k, v in typing.items(): for vv in v: if graph_to_origin[k] != instance[rule.p_lhs[vv]]: raise RewritingError( "The specified typing of '{}' ".format(graph_id) + "by the interface is not valid: " "node '{}' is typed by '{}' ".format( k, graph_to_origin[k]) + "in the origin of rewriting, while the interface " "node '{}' is typed by '{}'.".format( vv, instance[rule.p_lhs[vv]])) # Check composability of p_typing for graph_id, typing in p_typing.items(): predecessors = self.predecessors(graph_id) for pred in predecessors: if pred not in p_typing: # check that the typing of 'graph_id' is canonical canonical = False for graph_n, p_nodes in typing.items(): if len(p_nodes) > 0: lhs_n = rule.p_lhs[list(p_nodes)[0]] canonical_clones = set(keys_by_value(rule.p_lhs, lhs_n)) if p_nodes == canonical_clones: canonical = False if not canonical: raise RewritingError( "Typing of '{}' by the interface ".format( graph_id) + "is not composable with the " "typig of '{}': ".format(pred) + "propagation to '{}' ".format(pred) + "is canonical and produces instances for {}, ".format( canonical_clones) + "while propagation to '{}' ".format( graph_id) + "produces only for '{}' ".format( p_nodes) ) successors = self.successors(graph_id) for suc in successors: suc_typing = self.get_typing(graph_id, suc) # check p_typing for suc is composable for graph_n, p_nodes in typing.items(): suc_n = suc_typing[graph_n] if suc in p_typing and suc_n in p_typing[suc]: suc_p_nodes = p_typing[suc][suc_n] if not p_nodes.issubset(suc_p_nodes): raise RewritingError( "Typing of '{}' by the interface ".format( graph_id) + "is not composable with the " "typig of '{}': ".format(suc) + "propagation to the node " "'{}' of '{}' ".format(graph_n, graph_id) + "will produce instances for {} ".format( p_nodes) + "while propagation to '{}' ".format( suc_n) + "typing it produces only {} ".format( suc_p_nodes) ) else: # ok, because suc_n is canonically cloned pass # Autocomplete and check rhs_typing new_rhs_typing = {} for graph_id, typing in rhs_typing.items(): for descendant, descendant_typing in self.get_descendants( graph_id).items(): if descendant not in rhs_typing: # autocomplete descendant typing in the new rhs typing new_rhs_typing[descendant] = { rhs_n: { descendant_typing[graph_n] for graph_n in graph_ns } for rhs_n, graph_ns in typing.items() } else: # autocomplete descendant typing with missing rhs nodes # and check that already specified types for descendant # are composable with the typing for 'graph_id' descendant_rhs_typing = rhs_typing[descendant] for rhs_n, graph_ns in typing.items(): if rhs_n in descendant_rhs_typing: descendant_ns = descendant_rhs_typing[rhs_n] for graph_n in graph_ns: if descendant_typing[graph_n] not in descendant_ns: raise RewritingError( "Typing of the RHS " "by '{}' is not composable ".format( graph_id) + "with its typing by '{}': ".format( descendant) + "node '{}' is typed by '{}' ".format( rhs_n, graph_n) + "in '{}' that is not typed by ".format( graph_id) + "either of {} from '{}'".format( descendant_ns, descendant) ) else: new_rhs_typing[descendant] = rhs_typing[descendant] new_rhs_typing[descendant][rhs_n] = { descendant_typing[graph_n] for graph_n in graph_ns } for g, t in new_rhs_typing.items(): rhs_typing[g] = t # Check rhs_typing does not retype nodes for graph_id, typing in rhs_typing.items(): origin_to_graph = self.get_typing(origin_id, graph_id) for k, v in typing.items(): p_nodes = keys_by_value(rule.p_rhs, k) if len(p_nodes) > 0: graph_nodes = set([ origin_to_graph[instance[rule.p_lhs[p_node]]] for p_node in p_nodes]) if graph_nodes != v: raise RewritingError( "The specified typing of the RHS " "by the graph '{}' ".format(graph_id) + "is not valid: " "node '{}' is a typed by {} ".format( k, graph_nodes) + "in the origin of rewriting, while it is " "typed by {} in the typing.".format(v)) # If rewriting is strict, check p_typing types all clones, # there are no instances of removed elements and # and rhs typing types all new nodes and no different # types are merged if strict is True: test_strictness(self, origin_id, rule, instance, p_typing, rhs_typing) return instance, p_typing, rhs_typing def _restrictive_rewrite(self, graph_id, rule, instance): """Perform a restrictive rewrite of the specified graph. This method rewrites the graph and updates its typing by the immediate successors. Note that as the result of this update, some homomorphisms (from ancestors) are broken! """ # Extract the restrictive part of the rule restrictive_rule = Rule(p=rule.p, lhs=rule.lhs, p_lhs=rule.p_lhs) g = self.get_graph(graph_id) p_g_m = g.rewrite( restrictive_rule, instance) g_m_g = { v: instance[restrictive_rule.p_lhs[k]] for k, v in p_g_m.items() } g_m_g.update( { n: n for n in g.nodes() if n not in p_g_m.values() }) self._restrictive_update_incident_homs(graph_id, g_m_g) self._restrictive_update_incident_rels(graph_id, g_m_g) return p_g_m, g_m_g def _expansive_rewrite(self, graph_id, rule, instance): """Perform an expansive rewrite of the specified graph. This method rewrites the graph and updates its typing by the immediate predecessors. Note that as the result of this update, some homomorphisms (to descendants) are broken! """ # Extract the expansive part of the rule expansive_rule = Rule(p=rule.p, rhs=rule.rhs, p_rhs=rule.p_rhs) g = self.get_graph(graph_id) pred_typings = { p: self.get_typing(p, graph_id) for p in self.predecessors(graph_id) } adj_relations = { a: self.get_relation(graph_id, a) for a in self.adjacent_relations(graph_id) } r_g_prime = g.rewrite( expansive_rule, instance) g_m_g_prime = { v: r_g_prime[expansive_rule.p_rhs[k]] for k, v in instance.items() } g_m_g_prime.update( { n: n for n in g.nodes() if n not in instance.values() }) self._expansive_update_incident_homs(graph_id, g_m_g_prime, pred_typings) self._expansive_update_incident_rels(graph_id, g_m_g_prime, adj_relations) return r_g_prime, g_m_g_prime def _compose_backward(self, pred_id, graph_id, g_m_g, graph_m_origin_m, pred_m_origin_m, pred_typing): graph_nodes = self.get_graph(graph_id).nodes() return get_unique_map_to_pullback( graph_nodes, g_m_g, graph_m_origin_m, pred_typing, pred_m_origin_m) def _propagate_backward(self, origin_id, rule, instance, p_origin_m, origin_m_origin, p_typing): """Peform backward propagation of the original rewriting. Parameters ---------- Returns ------- """ g_m_gs = {origin_id: origin_m_origin} g_m_origin_ms = {} for graph_id in self.bfs_tree(origin_id, reverse=True): graph_p_typing = {} if graph_id in p_typing.keys(): graph_p_typing = p_typing[graph_id] origin_typing = self.get_typing(graph_id, origin_id) g_m_g = self._get_identity_map(graph_id) g_m_origin_m = copy.deepcopy(origin_typing) # Propagate node clones if len(rule.cloned_nodes()) > 0: self._propagate_clone( origin_id, graph_id, p_origin_m, origin_m_origin, graph_p_typing, g_m_g, g_m_origin_m) # Propagate node deletes if len(rule.removed_nodes()) > 0: self._propagate_node_removal( origin_id, graph_id, rule, instance, g_m_g, g_m_origin_m) # Propagate node attrs deletes if len(rule.removed_node_attrs()) > 0: self._propagate_node_attrs_removal( origin_id, graph_id, rule, p_origin_m, g_m_origin_m) # Propagate edge deletes if len(rule.removed_edges()) > 0: self._propagate_edge_removal( origin_id, graph_id, g_m_origin_m) # Propagate edge attrs deletes if len(rule.removed_edge_attrs()) > 0: self._propagate_edge_attrs_removal( origin_id, graph_id, rule, p_origin_m, g_m_origin_m) g_m_gs[graph_id] = g_m_g g_m_origin_ms[graph_id] = g_m_origin_m # Reconnect broken homomorphisms by composability for graph_id, g_m_g in g_m_gs.items(): for pred in self.predecessors(graph_id): pred_typing = self.get_typing(pred, graph_id) if graph_id != origin_id: pred_graph = self._compose_backward( pred, graph_id, g_m_g, g_m_origin_ms[graph_id], g_m_origin_ms[pred], pred_typing) else: pred_graph = g_m_origin_ms[pred] self._update_mapping(pred, graph_id, pred_graph) def _expansive_rewrite_and_propagate_forward(self, origin_id, rule, instance, p_origin_m, rhs_typing): bfs_tree = self.bfs_tree(origin_id) bfs_tree.reverse() g_g_primes = {} rhs_g_primes = {} for graph in bfs_tree: origin_typing = self.get_typing(origin_id, graph) rhs_graph_typing = {} if graph in rhs_typing.keys(): rhs_graph_typing = rhs_typing[graph] g_g_prime = { n: n for n in self.get_graph(graph).nodes() } rhs_g_prime = compose(p_origin_m, origin_typing) pred_typings = { p: self.get_typing(p, graph) for p in self.predecessors(graph) } adj_relations = { a: self.get_relation(graph, a) for a in self.adjacent_relations(graph) } # Propagate node merges if len(rule.merged_nodes()) > 0: self._propagate_merge( origin_id, graph, rule, p_origin_m, g_g_prime, rhs_g_prime) # Propagate node additions if len(rule.added_nodes()) > 0: self._propagate_node_addition( origin_id, graph, rule, rhs_graph_typing, g_g_prime, rhs_g_prime) self._expansive_update_incident_homs( graph, g_g_prime, pred_typings) self._expansive_update_incident_rels( graph, g_g_prime, adj_relations) keys_to_remove = set() keys_to_add = dict() for k, v in rhs_g_prime.items(): if k not in rule.rhs.nodes(): if k in rule.p.nodes(): keys_to_add[rule.p_rhs[k]] = v keys_to_remove.add(k) for k in keys_to_remove: del rhs_g_prime[k] for k, v in keys_to_add.items(): rhs_g_prime[k] = v # Propagate node attrs additions if len(rule.added_node_attrs()) > 0: self._propagate_node_attrs_addition( origin_id, graph, rule, rhs_g_prime) # Propagate edge additions if len(rule.added_edges()) > 0: self._propagate_edge_addition( origin_id, graph, rule, rhs_g_prime) # Propagate edge attrs additions if len(rule.added_edge_attrs()) > 0: self._propagate_edge_attrs_addition( origin_id, graph, rule, rhs_g_prime) g_g_primes[graph] = g_g_prime rhs_g_primes[graph] = rhs_g_prime # Perform an expansive rewrite rhs_origin_prime, origin_m_origin_prime = self._expansive_rewrite( origin_id, rule, p_origin_m) rhs_g_primes[origin_id] = rhs_origin_prime g_g_primes[origin_id] = origin_m_origin_prime # Reconnect broken homomorphisms by composability for graph_id, g_g_prime in g_g_primes.items(): graph_nodes = self.get_graph(graph_id).nodes() for suc in self.successors(graph_id): suc_typing = self.get_typing(graph_id, suc) for rhs_node in rule.rhs.nodes(): g_nodes = keys_by_value( g_g_prime, rhs_g_primes[graph_id][rhs_node]) suc_node = rhs_g_primes[suc][rhs_node] for n in g_nodes: suc_typing[n] = suc_node graph_suc = get_unique_map_from_pushout( graph_nodes, rhs_g_primes[graph_id], g_g_prime, rhs_g_primes[suc], suc_typing) self._update_mapping(graph_id, suc, graph_suc) return rhs_origin_prime @staticmethod def _produce_clones(origin_clones, p_origin_m, g, origin_typing, graph_p, local_g_m_g, local_g_m_origin_m): cloned_nodes = {} for n, clones in origin_clones.items(): nodes_to_clone = keys_by_value(origin_typing, n) for node in nodes_to_clone: if node in graph_p.keys(): p_nodes = graph_p[node] else: p_nodes = [ keys_by_value(p_origin_m, c)[0] for c in clones ] for i, p_node in enumerate(p_nodes): if i == 0: cloned_nodes[node] = p_node local_g_m_g[node] = node local_g_m_origin_m[node] = p_origin_m[p_node] else: new_name = g.clone_node(node) cloned_nodes[new_name] = p_node local_g_m_g[new_name] = node local_g_m_origin_m[new_name] = p_origin_m[p_node] return cloned_nodes def _propagate_clone(self, origin_id, node_id, p_origin_m, origin_m_origin, p_typing, g_m_g, g_m_origin_m): """Propagate clones from 'origin_id' to 'graph_id'. Perform a controlled propagation of clones to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed p_origin_m : dict Instance of rule's interface inside the updated origin origin_m_origin : dict Map from the updated origin to the initial origin p_typing : dict Controlling relation from the nodes of 'graph_id' to the nodes of the interfaces Returns ------- g_m_g : dict Map from the updated 'graph_id' to the 'graph_id' """ cloned_origin_nodes = {} for n in set(origin_m_origin.values()): clones = keys_by_value(origin_m_origin, n) if len(clones) > 1: cloned_origin_nodes[n] = clones graph = self.get_graph(node_id) origin_typing = self.get_typing(node_id, origin_id) cloned_origin_nodes = {} for n in set(origin_m_origin.values()): clones = keys_by_value(origin_m_origin, n) if len(clones) > 1: cloned_origin_nodes[n] = clones self._produce_clones( cloned_origin_nodes, p_origin_m, graph, origin_typing, p_typing, g_m_g, g_m_origin_m) self._restrictive_update_incident_homs(node_id, g_m_g) self._restrictive_update_incident_rels(node_id, g_m_g) return g_m_g, g_m_origin_m def _propagate_node_removal(self, origin_id, node_id, rule, instance, g_m_g, g_m_origin_m): """Propagate node removal from 'origin_id' to 'graph_id'. Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed origin_m_origin : dict Map from the updated origin to the initial origin Returns ------- g_m_g : dict Map from the updated 'graph_id' to the 'graph_id' """ graph = self.get_graph(node_id) origin_typing = self.get_typing(node_id, origin_id) for lhs_node in rule.removed_nodes(): origin_n = instance[lhs_node] graph_nodes = keys_by_value(origin_typing, origin_n) for node in graph_nodes: graph.remove_node(node) del g_m_g[node] del g_m_origin_m[node] for node in graph.nodes(): if node not in origin_typing.keys(): graph.remove_node(node) del g_m_g[node] del g_m_origin_m[node] self._restrictive_update_incident_homs(node_id, g_m_g) self._restrictive_update_incident_rels(node_id, g_m_g) return g_m_g, g_m_origin_m def _propagate_node_attrs_removal(self, origin_id, node_id, rule, p_origin_m, g_m_origin_m): """Propagate node attrs removal from 'origin_id' to 'graph_id'. Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule instance : dict Original instance """ graph = self.get_graph(node_id) # origin_typing = self.get_typing(node_id, origin_id) for p_node, attrs in rule.removed_node_attrs().items(): nodes_to_remove_attrs = keys_by_value( g_m_origin_m, p_origin_m[p_node]) for node in nodes_to_remove_attrs: graph.remove_node_attrs(node, attrs) def _propagate_edge_removal(self, origin_id, node_id, g_m_origin_m): """Propagate edge removal from 'origin_id' to 'graph_id'. Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting node_id : hashable ID of the node where propagation is performed p_origin_m : dict Instance of rule's interface inside the updated origin origin_m_origin : dict Map from the updated origin to the initial origin """ graph = self.get_graph(node_id) origin_graph = self.get_graph(origin_id) edges_to_remove = set() for s, t in graph.edges(): origin_s = g_m_origin_m[s] origin_t = g_m_origin_m[t] if (origin_s, origin_t) not in origin_graph.edges(): edges_to_remove.add((s, t)) for s, t in edges_to_remove: graph.remove_edge(s, t) def _propagate_edge_attrs_removal(self, origin_id, node_id, rule, p_origin_m, g_m_origin_m): """Propagate edge attrs removal from 'origin_id' to 'graph_id'. Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule p_origin_m : dict Instance of rule's interface inside the updated origin """ graph = self.get_graph(node_id) for (p_u, p_v), attrs in rule.removed_edge_attrs().items(): us = keys_by_value(g_m_origin_m, p_origin_m[p_u]) vs = keys_by_value(g_m_origin_m, p_origin_m[p_v]) for u in us: for v in vs: if (u, v) in graph.edges(): graph.remove_edge_attrs(u, v, attrs) def _propagate_merge(self, origin_id, graph_id, rule, p_origin_m, g_g_prime, rhs_g_prime): """Propagate merges from 'origin_id' to 'graph_id'. Perform a propagation of merges to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule p_origin_m : dict Instance of rule's interface inside the updated origin rhs_origin_prime : dict Instance of rule's rhs inside the updated origin g_g_prime : dict Map from the nodes of the graph 'graph_id' to the updated graph rhs_g_prime : dict Map from the RHS to the updated graph with 'graph_id' """ graph = self.get_graph(graph_id) for rhs_node, p_nodes in rule.merged_nodes().items(): graph_nodes = set([ rhs_g_prime[p_node] for p_node in p_nodes ]) if len(graph_nodes) > 1: merged_node_id = graph.merge_nodes(graph_nodes) for n in graph_nodes: g_g_prime[n] = merged_node_id for p_node in p_nodes: if p_node in rhs_g_prime: del rhs_g_prime[p_node] rhs_g_prime[rhs_node] = merged_node_id map_to_merge_node = set() for rhs_n, g_prime_n in rhs_g_prime.items(): if g_prime_n in graph_nodes: map_to_merge_node.add(rhs_n) for n in map_to_merge_node: rhs_g_prime[n] = merged_node_id else: for p_node in p_nodes: if p_node in rhs_g_prime: del rhs_g_prime[p_node] rhs_g_prime[rhs_node] = list(graph_nodes)[0] def _propagate_node_addition(self, origin_id, graph_id, rule, rhs_typing, g_g_prime, rhs_g_prime): """Propagate node additions from 'origin_id' to 'graph_id'. Perform a propagation of additions to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule rhs_origin_prime : dict Instance of rule's rhs inside the updated origin rhs_typing : dict Typing of the nodes from the rhs in 'graph_id' rhs_g_prime : dict Map from the RHS to the updated graph with 'graph_id' """ graph = self.get_graph(graph_id) for rhs_node in rule.added_nodes(): if rhs_node in rhs_typing: if len(rhs_typing[rhs_node]) == 1: rhs_g_prime[rhs_node] = list( rhs_typing[rhs_node])[0] else: nodes_to_merge = rhs_typing[rhs_node] new_node_id = graph.merge_nodes(nodes_to_merge) for n in nodes_to_merge: g_g_prime[n] = new_node_id p_nodes = keys_by_value(rhs_g_prime, n) for p_node in p_nodes: if p_node in rhs_g_prime: del rhs_g_prime[p_node] rhs_g_prime[rhs_node] = new_node_id map_to_merge_node = set() for rhs_n, g_prime_n in rhs_g_prime.items(): if g_prime_n in nodes_to_merge: map_to_merge_node.add(rhs_n) for n in map_to_merge_node: rhs_g_prime[n] = new_node_id else: new_node_id = graph.generate_new_node_id(rhs_node) graph.add_node(new_node_id) rhs_g_prime[rhs_node] = new_node_id def _propagate_node_attrs_addition(self, origin_id, graph_id, rule, rhs_g_prime): """Propagate node attrs additions from 'origin_id' to 'graph_id'. Perform a propagation of additions to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule rhs_origin_prime : dict Instance of rule's rhs inside the updated origin origin_prime_g_prime : dict Map from the updated origin to the updated graph with 'graph_id' """ graph = self.get_graph(graph_id) for rhs_node, attrs in rule.added_node_attrs().items(): graph.add_node_attrs( rhs_g_prime[rhs_node], attrs) def _propagate_edge_addition(self, origin_id, graph_id, rule, rhs_g_prime): """Propagate edge additions from 'origin_id' to 'graph_id'. Perform a propagation of additions to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed rule : regraph.Rule Original rewriting rule rhs_origin_prime : dict Instance of rule's rhs inside the updated origin origin_prime_g_prime : dict Map from the updated origin to the updated graph with 'graph_id' """ graph = self.get_graph(graph_id) for s, t in rule.added_edges(): g_s = rhs_g_prime[s] g_t = rhs_g_prime[t] if (g_s, g_t) not in graph.edges(): graph.add_edge(g_s, g_t) def _propagate_edge_attrs_addition(self, origin_id, graph_id, rule, rhs_g_prime): """Propagate edge attrs additions from 'origin_id' to 'graph_id'. Perform a propagation of additions to 'graph' Parameters ---------- origin_id : hashable ID of the graph corresponding to the origin of rewriting graph_id : hashable ID of the graph where propagation is performed """ graph = self.get_graph(graph_id) for (s, t), attrs in rule.added_edge_attrs().items(): g_s = rhs_g_prime[s] g_t = rhs_g_prime[t] graph.add_edge_attrs(g_s, g_t, attrs) def _get_identity_map(self, graph_id): return { n: n for n in self.get_graph(graph_id).nodes() } def _restrictive_update_incident_homs(self, node_id, g_m_g): for suc in self.successors(node_id): typing = self.get_typing(node_id, suc) self._update_mapping(node_id, suc, compose(g_m_g, typing)) def _restrictive_update_incident_rels(self, graph_id, g_m_g): for related_g in self.adjacent_relations(graph_id): rel = self.get_relation(graph_id, related_g) new_rel = dict() for node in self.get_graph(graph_id).nodes(): old_node = g_m_g[node] ns = keys_by_value(g_m_g, old_node) if old_node in rel.keys(): new_rel[node] = rel[old_node] self._update_relation(graph_id, related_g, new_rel) def _expansive_update_incident_homs(self, graph_id, g_m_g_prime, pred_typings): for pred, typing in pred_typings.items(): self._update_mapping( pred, graph_id, compose(typing, g_m_g_prime)) def _expansive_update_incident_rels(self, graph_id, g_m_g_prime, adj_relations): for related_g, rel in adj_relations.items(): new_rel = dict() for node in self.get_graph(graph_id).nodes(): if node in g_m_g_prime: new_node = g_m_g_prime[node] if node in rel.keys(): new_rel[new_node] = rel[node] self._update_relation(graph_id, related_g, new_rel) def _check_applicability(self, rule_hierarchy, instances): """Check if a rule hierarchy is applicable.""" # Check that instances commute for (s, t) in self.typings(): if s in instances and t in instances: typing = self.get_typing(s, t) lhs_s_t1 = compose(instances[s], typing) lhs_s_t2 = compose( rule_hierarchy["rule_homomorphisms"][(s, t)][0], instances[t]) if (lhs_s_t1 != lhs_s_t2): raise RewritingError( "Instance of a specified rule for '{}' ({}) ".format( s, instances[s]) + "is not compatible with such instance for '{}' ({})".format( t, instances[t])) # Check the applicability t_rule = rule_hierarchy["rules"][t] if t_rule.is_restrictive(): # Check the square L_S -> S -> T and L_S -> L_T -> T is a PB for lhs_t_node in t_rule.lhs.nodes(): t_node = instances[t][lhs_t_node] s_nodes = keys_by_value(typing, t_node) if lhs_t_node in t_rule.removed_nodes(): for s_node in s_nodes: error = False if s_node not in instances[s].values(): error = True elif keys_by_value(instances[s], s_node)[ 0] not in rule_hierarchy[ "rules"][s].removed_nodes(): error = True if error: raise RewritingError( "Specified rule hierarchy is not applicable " "the typing of the node '{}' ".format(s_node) + "from the graph '{}' is removed ".format(s) + "by rewriting of '{}', but ".format(t) + "this node is not removed by the rule " + "applied to '{}'".format(s) ) if lhs_t_node in t_rule.cloned_nodes(): for s_node in s_nodes: if s_node not in instances[s].values(): raise RewritingError( "Specified rule hierarchy is not applicable " "the typing of the node '{}' ".format(s_node) + "from the graph '{}' is cloned ".format(s) + "by rewriting of '{}', but the ".format(t) + "retyping of this node is not specified, i.e. " + "'{}' is not in the instances of ".format(s_node) + "the rule applied to '{}'".format(s) ) def relabel_nodes(self, graph, mapping): """Relabel nodes of a graph in the hierarchy.""" graph = self.get_graph(graph) graph.relabel_nodes(mapping) def graphs_typed_by_node(self, graph_id, node_id): """Get graphs typed by 'node_id' in 'graph_id'.""" graphs = [] for p in self.predecessors(graph_id): p_typing = self.get_typing(p, graph_id) if node_id in p_typing.values(): graphs.append(p) return graphs
{ "repo_name": "Kappa-Dev/ReGraph", "path": "regraph/hierarchies.py", "copies": "1", "size": "106492", "license": "mit", "hash": -8490334144673295000, "line_mean": 39.6613211149, "line_max": 91, "alpha_frac": 0.4767963791, "autogenerated": false, "ratio": 4.498458159084189, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5475254538184189, "avg_score": null, "num_lines": null }
# Abstract image generator using random functions # Nathan Reed, July 2012 import uuid, io import numpy as np, optparse, random, time from PIL import Image # Parse command-line options parser = optparse.OptionParser() parser.add_option('-o', '--output', dest='outputPath', help='Write output to FILE', metavar='FILE') parser.add_option('-d', '--dims', dest='dims', default='512x512', help='Image width x height, e.g. 320x240') parser.add_option('-s', '--seed', dest='seed', default=int(1000 * time.time()), help='Random seed (uses system time by default)') options, _ = parser.parse_args() dX, dY = (int(n) for n in options.dims.lower().split('x')) try: options.seed = int(options.seed) except ValueError: pass random.seed(options.seed) if not options.outputPath: options.outputPath = str(options.seed) + '.png' # Generate x and y images, with 3D shape so operations will correctly broadcast. xArray = np.linspace(0.0, 1.0, dX).reshape((1, dX, 1)) yArray = np.linspace(0.0, 1.0, dY).reshape((dY, 1, 1)) # Adaptor functions for the recursive generator # Note: using python's random module because numpy's doesn't handle seeds longer than 32 bits. def randColor(): return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3)) def xVar(): return xArray def yVar(): return yArray def safeDivide(a, b): return np.divide(a, np.maximum(b, 0.001)) # Recursively build an image using a random function. Functions are built as a parse tree top-down, # with each node chosen randomly from the following list. The first element in each tuple is the # number of required recursive calls and the second element is the function to evaluate the result. functions = ( (0, randColor), (0, xVar), (0, yVar), (1, np.sin), (1, np.cos), (2, np.add), (2, np.subtract), (2, np.multiply), (2, safeDivide), ) depthMin = 2 depthMax = 10 def build_img(depth = 0): funcs = [f for f in functions if (f[0] > 0 and depth < depthMax) or (f[0] == 0 and depth >= depthMin)] nArgs, func = random.choice(funcs) args = [build_img(depth + 1) for n in range(nArgs)] return func(*args) def save_img(): img = build_img() # Ensure it has the right dimensions img = np.tile(img, (dX // img.shape[0], dY // img.shape[1], 3 // img.shape[2])) # Convert to 8-bit, send to PIL and save img8Bit = np.uint8(np.rint(img.clip(0.0, 1.0) * 255.0)) output_path = str(uuid.uuid1()) + '.png' imgByteArr = io.BytesIO() Image.fromarray(img8Bit).save(imgByteArr, format='PNG') imgByteArr = imgByteArr.getvalue() return imgByteArr #Image.fromarray(img8Bit).save(output_path) #print('Seed %s; wrote output to %s' % (repr(options.seed), output_path))
{ "repo_name": "Hurence/log-island", "path": "logisland-docker/loggen/randimg.py", "copies": "2", "size": "2758", "license": "apache-2.0", "hash": 2412504988775157000, "line_mean": 32.475, "line_max": 129, "alpha_frac": 0.6682378535, "autogenerated": false, "ratio": 2.975188781014024, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9527791941130669, "avg_score": 0.023126938676671063, "num_lines": 80 }
"""Abstract interface for generative models. """ # standard imports from typing import Iterable, Union, Tuple from collections.abc import Iterable as AbstractIterable import os import logging # third-party imports import numpy as np # Toolbox imports from dltb.base.busy import BusyObservable, busy from dltb.base.data import Data from dltb.base.image import Image, ImageObservable # logging LOG = logging.getLogger(__name__) class Generator: """A :py:class:`Generator` represents a generative model. In general terms, a generative model describes a data distribution and allows to sample from that distribution. Sampling can be seen as a creative process, generating new data. The :py:class:`Generator` class allows to sample by calling the :py:meth:`generate` method. Currently this class aims at generative models realized as random variable, that is a mapping from a feature space Z into the dataspace X, or short: a function X=f(Z). A subclass implementing a generative model should overwrite at least one of the methdos :py:class:`_generate_single` (sample a single datapoint) or :py:class:`_generate_batch` (sample a batch of data points). In the context of deep learning, prominent approaches to generative models are generative adversarial networks (GAN), variational autoencoders (VAE), autoregressive architectures, and normalized flow. There are subclasses providing specialized interfaces to these models. """ _feature_dimensions: int = None Data: type = Data @property def feature_dimensions(self) -> int: """The dimensionality of the feature vector. """ return self._feature_dimensions @property def data_shape(self) -> Tuple[int]: """The dimensionality of the data vector. """ return self._data_shape() def random_features(self, batch: int = None, seed: Union[int, Iterable[int]] = None) -> np.ndarray: """Create random features that can be used to generate data. The features will be randomly sampled from the distribution the generator is was trained on. Notice that different generators may be trained on different distributions and hence using features from the correct distribution is crucial for obtaining best generation results. The default implementation will create feature vectors, sampled from a normal distribution. Subclasses may overwrite this method to realize other distributions. Arguments --------- batch: The batch size specifying how many feature vectors to create. If `None`, no batch but a single feature vector is returned. seed: Seed for the pseudo random number generator. If an `Iterable`, a batch of feature vectors is created, with each vector created after the random number generator was seeded with the next seed. Results ------- features: A numpy array, either a single feature vector of shape (FEATURE_DIMS,) if `batch` is `None` or a batch of feature vectors of shape (`batch`, FEATURE_DIMS) if `batch` is an integer. """ if isinstance(seed, AbstractIterable): result = [] for next_seed in seed: result.append(self.random_features(seed=next_seed)) if batch is not None and len(result) != batch: raise ValueError(f"Inconsistent batch size ({batch}) " f"and number of seeds ({len(result)}).") return np.asarray(result) shape = ((self._feature_dimensions,) if not batch else (batch, self._feature_dimensions)) return self._randn(shape, seed) def _randn(self, shape, seed=0): rnd = np.random.RandomState(seed) return rnd.randn(*shape) def _check_arguments(self, features: np.ndarray, seed: Union[int, Iterable[int]], batch: int) -> Tuple[np.ndarray, Union[int, Tuple[int]]]: if features is not None: if seed is not None: raise ValueError("Cannot access both, features and seed.") if batch is not None: if features.ndim == 1: raise ValueError("No batch size is accepted, when " "specifying a single feature vector.") if len(features) != batch: raise ValueError("Inconsistent batch size: " f"{batch} (batch) vs. " f"{len(features)} (features).") return features, None if seed is not None and not isinstance(seed, int): seed = tuple(seed) if batch is None: batch = len(seed) elif batch != len(seed): raise ValueError("Inconsistent batch size: " f"{batch} (batch) vs. {len(seed)} (seed).") return self.random_features(batch=batch, seed=seed), seed def generate(self, features: np.ndarray = None, seed: Union[int, Iterable[int]] = None, batch: int = None) -> Data: """Generate data by sampling from the distribution described by this :py:class:`Generator`. Arguments --------- features: A single feature vector with shape (FEATURE_DIMS,) or a batch of feature vectors with shape (BATCH_SIZE, FEATURE_DIMS) used for the generation. seed: Seed for creating a random feature vector. If iteratable, a batch of feature vectors will be created. batch: If not `None` this should be an integer specifying the batch size, that is the number of data points to be generated (sampled). If `features` or `seed` are also provided, the implied batch size should be compatible with the `batch` argument. Result ------ generatum: A :py:class:`Data` object containing the generated data as its `array` attribute. The features used for generation will be provided in the `features` attribute. If a seed was provided, it will be stored in the `seed` attribute. """ features, seed = self._check_arguments(features, seed, batch) array = self._generate_array(features) generatum = self.Data(array, batch=(features.ndim > 1)) generatum.add_attribute('features', value=features, batch=True) if seed is not None: generatum.add_attribute('seed', value=seed, batch=not isinstance(seed, int)) return generatum def generate_array(self, features: np.ndarray = None, seed: Union[int, Iterable[int]] = None, batch: int = None) -> np.ndarray: """Generate data as (numpy) array. """ features, _seed = self._check_arguments(features, seed, batch) return self._generate_array(features) def _generate_array(self, features: np.ndarray) -> np.ndarray: # check if we have a single feature vector (batch == False) # or a batch of feature vectors (batch == True) generate_batch = (features.ndim > 1) return (self._generate_batch(features) if generate_batch else self._generate_single(features)) def _generate_single(self, features: np.ndarray) -> np.ndarray: """ """ if type(self)._generate_batch is Generator._generate_batch: raise NotImplementedError("At least one of _generate_single " "or _generate_batch has to be " "implemented by subclass " f"{type(self)} of Generator.") # generate single by generating a batch of size 1 return self._generate_batch(features[np.newaxis, ...])[0] def _generate_batch(self, features: np.ndarray) -> np.ndarray: """Actual implementation of the generator method of this :py:class:`Generator`. Generate a batch of data from a batch of feature vectors. Arguments --------- features: A batch of feature vectors of shape (BATCH_SIZE, FEATURE_DIMS) used for the generation. Result ------ generatum: An array of shape (BATCH_SIZE, DATA_SHAPE) containing the generated data. """ if type(self)._generate_single is Generator._generate_single: raise NotImplementedError("At least one of _generate_single " "or _generate_batch has to be " "implemented by subclass " f"{type(self)} of Generator.") # generate batch by generating each single element first = self._generate_single(features[0]) batch = np.ndarray((len(features),) + first.shape, dtype=first.dtype) batch[0] = first for index, feature in enumerate(features[1:]): batch[index+1] = self._generate_single(feature) return batch def transition(self, seed1: int, seed2: int, steps: int = 100): """Generate a transition between to feature vectors. """ # 400,602 # seed1 = 400 # seed2 = 42 # STEPS = 100 features = self.random_features(seed=[seed1, seed2]) # Take the difference between to vectors and divide it by the # number of steps to obtain the transition transition = np.linspace(features[0], features[1], steps) generatum = self.generate(features=transition) generatum.add_attribute('transition', value=(seed1, seed2)) return generatum class ImageGenerator(Generator): """An :py:class:`ImageGenerator` is a :py:class:`Generator` specialized on creating images. """ Data: type = Image def store_images(self, seeds, directory): """Generate a series of images and store them in a directory. """ os.makedirs(directory, exist_ok=True) # For each seed, generate the respective image seeds = list(seeds) for seed_idx, seed in enumerate(seeds): LOG.debug("Generating image for seed %d/%d ...", seed_idx, len(seeds)) image = self.generate_image(seed) # Save image path = os.path.join(directory, f"image{seed_idx}.png") # FIXME[todo]: should by imsave import PIL.Image PIL.Image.fromarray(image, 'RGB').save(path) LOG.debug("Generation complete!") class ImageGeneratorWorker(ImageObservable, BusyObservable): # FIXME[todo]: batch generator """An :py:class:`ImageGeneratorWorker` uses an :py:class:`ImageGenerator` to (asynchronously) perform image generation operation. It will inform :py:class:`ImageObservers` whenever a new result was generated. """ _generator: ImageGenerator = None def __init__(self, generator: ImageGenerator = None, **kwargs) -> None: super().__init__(**kwargs) self._image = None self._next_features = None self.generator = generator @property def generator(self) -> ImageGenerator: """The generator applied by this Worker. """ return self._generator @generator.setter def generator(self, generator: Generator) -> None: """Set the :py:class:`Generator` to be used by this Worker. """ self._generator = generator self._data = None @property def image(self) -> Image: return self._image def generate(self, features: np.ndarray) -> None: """Generate data using the generator. """ self._next_features = features if not self.busy: self._generate() # FIXME[todo]: self._work() @busy("generating") # FIXME[hack/bug]: if queueing is enabled, we are not really busy ... # (that is we are busy, but nevertheless accepting more work) def _generate(self): while self._next_features is not None: features = self._next_features self._next_features = None self._image = self._generator.generate(features) self.change('image_changed') def random(self, seed: int = None) -> None: """Generate random data. """ self._image = Image(self._generator.random(seed)) self.change('data_changed') # FIXME[todo]: make some broader video concept def make_video(self): """Generate a sequence of images into a video stream. """ # Link the images into a video. # !ffmpeg -r 30 -i {config.result_dir}/image%d.png -vcodec mpeg4 -y movie.mp4 # FIXME[todo]: @property def old_batch(self) -> Image: """old? - if not used -> remove """ return self._image class GAN(Generator): """A generative adversarial network. """ @property def generator(self) -> Generator: pass @property def discriminator(self): # FIXME[todo] -> Network: pass class ImageGAN(GAN, ImageGenerator): """A generative adversarial network (GAN) for generating images. """ # FIXME[todo]: implementation mechanism for abstract classes implementations: dict = {}
{ "repo_name": "Petr-By/qtpyvis", "path": "dltb/tool/generator.py", "copies": "1", "size": "13795", "license": "mit", "hash": -6871520787043376000, "line_mean": 35.4947089947, "line_max": 85, "alpha_frac": 0.5924610366, "autogenerated": false, "ratio": 4.560330578512397, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5652791615112397, "avg_score": null, "num_lines": null }
"""Abstract interface for OID Storage objects""" from __future__ import generators from twistedsnmp import errors __metaclass__ = type class OIDStore: """Interface for the OID Storage mechanism The role of this mechanism is to store and retrieve OID: value pairs. Since most of the common queries involve finding, not a specific OID, but rather the next OID following a given OID, it is necessary for OID stores to use an ordered storage format with fast retrieval characteristics, such as a bisect list, or a BSDDB BTree database. """ def close( self ): """Close the OIDStore""" def getExactOID( self, base ): """Get the given OID,value pair for the given base This method is responsible for implementing the GET request, (or a GETBULK request which specifies inclusive operation). """ def firstOID( self ): """Retrieve the first OID,value pair for the storage Raises OIDNameError if there are no pairs available """ def nextOID( self, base ): """Get next OID,value pair after given base OID This method is responsible for implementing GETNEXT, and GETBULK requests. """ def validateSetValue( self, oid, value, request, address, implementation ): """Validate that given oid & value can be set returns 0 on success, returns errorID on failure This implementation just returns 0 in all cases """ return 0 def setValue( self, oid, value): """Set the given oid,value pair, returning old value This method is responsible for implementing the SET request. """ def update( self, valueSet ): """Given a valueSet, load given values into storage valueSet -- A set of OID:value mappings in these forms { rootOID : { oid : value }} OIDStore instance -- iterable as a (key,value) producing sequence [ (oid,value) ] # value can also be a dictionary or OIDStore (as seen above) [ dict, OIDStore, ... ] # i.e. just a sequence of dicts or storages XXX Should allow for passing in another OIDStore, but that Will require a first() method for all OIDStores """ if hasattr( valueSet, 'items' ): valueSet = valueSet.items() if not valueSet: return 0 # okay, now should be list of tuples or an OIDStore # or a list of OIDStores/dictionaries count = 0 for item in valueSet: if isinstance( item, (dict,OIDStore)): count += self.update( item ) else: key, value = item if isinstance( value, (dict, OIDStore) ): count += self.update( value ) else: count += 1 self.setValue( key, value ) return count def __iter__( self ): """Create an iterator object for this OIDStore""" try: oid,value = self.firstOID() yield oid, value while oid: oid,value = self.nextOID( oid ) yield oid, value except errors.OIDNameError, err: pass def dumbPrefix( key, oid ): """Is the key == oid or a parent of OID? This function is used by sub-classes to do a simple check for oid inheritence. """ return oid[:len(key)] == key
{ "repo_name": "mmattice/TwistedSNMP", "path": "oidstore.py", "copies": "1", "size": "2963", "license": "bsd-3-clause", "hash": -1011472035455966300, "line_mean": 28.9292929293, "line_max": 79, "alpha_frac": 0.6962537968, "autogenerated": false, "ratio": 3.4858823529411764, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4682136149741176, "avg_score": null, "num_lines": null }
# Abstract I/O dispatcher. Defines standard dispatcher API from pysnmp.carrier import error class TimerCallable: def __init__(self, cbFun, callInterval): self.__cbFun = cbFun self.__callInterval = callInterval self.__nextCall = 0 def __call__(self, timeNow): if self.__nextCall <= timeNow: self.__cbFun(timeNow) self.__nextCall = timeNow + self.__callInterval def __eq__(self, cbFun): return self.__cbFun == cbFun def __ne__(self, cbFun): return self.__cbFun != cbFun def __lt__(self, cbFun): return self.__cbFun < cbFun def __le__(self, cbFun): return self.__cbFun <= cbFun def __gt__(self, cbFun): return self.__cbFun > cbFun def __ge__(self, cbFun): return self.__cbFun >= cbFun class AbstractTransportDispatcher: def __init__(self): self.__transports = {} self.__jobs = {} self.__recvCbFun = None self.__timerCallables = [] self.__ticks = 0 self.__timerResolution = 0.5 self.__nextTime = 0 def _cbFun(self, incomingTransport, transportAddress, incomingMessage): for name, transport in self.__transports.items(): if transport is incomingTransport: transportDomain = name break else: raise error.CarrierError( 'Unregistered transport %s' % (incomingTransport,) ) if self.__recvCbFun: self.__recvCbFun( self, transportDomain, transportAddress, incomingMessage ) else: raise error.CarrierError( 'Receive callback not registered -- loosing incoming event' ) # Dispatcher API def registerRecvCbFun(self, recvCbFun): if self.__recvCbFun: raise error.CarrierError( 'Receive callback already registered' ) self.__recvCbFun = recvCbFun def unregisterRecvCbFun(self): self.__recvCbFun = None def registerTimerCbFun(self, timerCbFun, tickInterval=None): if not tickInterval: tickInterval = self.__timerResolution self.__timerCallables.append(TimerCallable(timerCbFun, tickInterval)) def unregisterTimerCbFun(self, timerCbFun=None): if timerCbFun: self.__timerCallables.remove(timerCbFun) else: self.__timerCallables = [] def registerTransport(self, tDomain, transport): if tDomain in self.__transports: raise error.CarrierError( 'Transport %s already registered' % (tDomain,) ) transport.registerCbFun(self._cbFun) self.__transports[tDomain] = transport def unregisterTransport(self, tDomain): if tDomain not in self.__transports: raise error.CarrierError( 'Transport %s not registered' % (tDomain,) ) self.__transports[tDomain].unregisterCbFun() del self.__transports[tDomain] def getTransport(self, transportDomain): if transportDomain in self.__transports: return self.__transports[transportDomain] raise error.CarrierError( 'Transport %s not registered' % (transportDomain,) ) def sendMessage( self, outgoingMessage, transportDomain, transportAddress ): if transportDomain in self.__transports: self.__transports[transportDomain].sendMessage( outgoingMessage, transportAddress ) else: raise error.CarrierError( 'No suitable transport domain for %s' % (transportDomain,) ) def getTimerResolution(self): return self.__timerResolution def setTimerResolution(self, timerResolution): if timerResolution < 0.01 or timerResolution > 10: raise error.CarrierError('Impossible timer resolution') self.__timerResolution = timerResolution def getTimerTicks(self): return self.__ticks def handleTimerTick(self, timeNow): if self.__nextTime == 0: # initial initialization self.__nextTime = timeNow + self.__timerResolution if self.__nextTime > timeNow: return self.__ticks += 1 self.__nextTime = timeNow + self.__timerResolution for timerCallable in self.__timerCallables: timerCallable(timeNow) def jobStarted(self, jobId): if jobId in self.__jobs: self.__jobs[jobId] = self.__jobs[jobId] + 1 else: self.__jobs[jobId] = 1 def jobFinished(self, jobId): self.__jobs[jobId] = self.__jobs[jobId] - 1 if self.__jobs[jobId] == 0: del self.__jobs[jobId] def jobsArePending(self): if self.__jobs: return 1 else: return 0 def runDispatcher(self, timeout=0.0): raise error.CarrierError('Method not implemented') def closeDispatcher(self): for tDomain in list(self.__transports): self.__transports[tDomain].closeTransport() self.unregisterTransport(tDomain) self.unregisterRecvCbFun() self.unregisterTimerCbFun()
{ "repo_name": "xfguo/pysnmp", "path": "pysnmp/carrier/base.py", "copies": "1", "size": "5324", "license": "bsd-3-clause", "hash": 8024979927153124000, "line_mean": 33.1282051282, "line_max": 77, "alpha_frac": 0.5848985725, "autogenerated": false, "ratio": 4.462699077954736, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5547597650454736, "avg_score": null, "num_lines": null }
""" Abstract IO handler """ import os class IOHandler(object): """ Abstract IO handler class Readers/writers should handle point-cloud data with the following structure: {'log': ['Processed by module load', 'Processed by module filter using parameters(x,y,z)'], 'pointcloud': {'offset': {'type': 'double', 'data': 12.1}}, 'vertex': {'x': {'type': 'double', 'data': np.array([0.1, 0.2, 0.3])}, 'y': {'type': 'double', 'data': np.array([0.1, 0.2, 0.3])}, 'z': {'type': 'double', 'data': np.array([0.1, 0.2, 0.3])}, 'return': {'type': 'int', 'data': np.array([1, 1, 2])}}} """ path = None def __init__(self, path, mode, overwrite=False): """ Perform some checks on the path where we need to operate (read/write). :param path: path where IO needs to take place :param mode: 'r' for reading, 'w' for writing :param overwrite: if writing, overwrite path if it already exists """ self.path = path if mode == 'r': if not os.path.exists(path): raise FileNotFoundError('{} not found.'.format(path)) elif mode == 'w': path_directory = os.path.dirname(path) if path_directory and not os.path.exists(path_directory): raise FileNotFoundError('Output file path does not exist! --> {}'.format(path_directory)) if os.path.exists(path): if not overwrite: # Raise most specific subclass of FileExistsError (3.6) and IOError (2.7). raise FileExistsError('Output file already exists! --> {}'.format(path)) else: os.remove(path) def read(self): """ Read the point cloud from disk :return point_cloud: """ raise NotImplementedError( "Class %s doesn't implement read()" % self.__class__.__name__) def write(self, point_cloud): """ Write the point cloud to disk :param point_cloud: """ raise NotImplementedError( "Class %s doesn't implement write()" % self.__class__.__name__)
{ "repo_name": "eEcoLiDAR/eEcoLiDAR", "path": "laserchicken/io/base_io_handler.py", "copies": "1", "size": "2198", "license": "apache-2.0", "hash": 7095977226915249000, "line_mean": 35.6333333333, "line_max": 105, "alpha_frac": 0.5414012739, "autogenerated": false, "ratio": 4.062846580406655, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000891255198411333, "num_lines": 60 }
"""Abstract I/O mode container. Mode strings are used in in `~fs.base.FS.open` and `~fs.base.FS.openbin`. """ from __future__ import print_function from __future__ import unicode_literals import typing import six from ._typing import Text if typing.TYPE_CHECKING: from typing import FrozenSet, Set, Union __all__ = ["Mode", "check_readable", "check_writable", "validate_openbin_mode"] # https://docs.python.org/3/library/functions.html#open @six.python_2_unicode_compatible class Mode(typing.Container[Text]): """An abstraction for I/O modes. A mode object provides properties that can be used to interrogate the `mode strings <https://docs.python.org/3/library/functions.html#open>`_ used when opening files. Example: >>> mode = Mode('rb') >>> mode.reading True >>> mode.writing False >>> mode.binary True >>> mode.text False """ def __init__(self, mode): # type: (Text) -> None """Create a new `Mode` instance. Arguments: mode (str): A *mode* string, as used by `io.open`. Raises: ValueError: If the mode string is invalid. """ self._mode = mode self.validate() def __repr__(self): # type: () -> Text return "Mode({!r})".format(self._mode) def __str__(self): # type: () -> Text return self._mode def __contains__(self, character): # type: (object) -> bool """Check if a mode contains a given character.""" assert isinstance(character, Text) return character in self._mode def to_platform(self): # type: () -> Text """Get a mode string for the current platform. Currently, this just removes the 'x' on PY2 because PY2 doesn't support exclusive mode. """ return self._mode.replace("x", "w") if six.PY2 else self._mode def to_platform_bin(self): # type: () -> Text """Get a *binary* mode string for the current platform. This removes the 't' and adds a 'b' if needed. """ _mode = self.to_platform().replace("t", "") return _mode if "b" in _mode else _mode + "b" def validate(self, _valid_chars=frozenset("rwxtab+")): # type: (Union[Set[Text], FrozenSet[Text]]) -> None """Validate the mode string. Raises: ValueError: if the mode contains invalid chars. """ mode = self._mode if not mode: raise ValueError("mode must not be empty") if not _valid_chars.issuperset(mode): raise ValueError("mode '{}' contains invalid characters".format(mode)) if mode[0] not in "rwxa": raise ValueError("mode must start with 'r', 'w', 'x', or 'a'") if "t" in mode and "b" in mode: raise ValueError("mode can't be binary ('b') and text ('t')") def validate_bin(self): # type: () -> None """Validate a mode for opening a binary file. Raises: ValueError: if the mode contains invalid chars. """ self.validate() if "t" in self: raise ValueError("mode must be binary") @property def create(self): # type: () -> bool """`bool`: `True` if the mode would create a file.""" return "a" in self or "w" in self or "x" in self @property def reading(self): # type: () -> bool """`bool`: `True` if the mode permits reading.""" return "r" in self or "+" in self @property def writing(self): # type: () -> bool """`bool`: `True` if the mode permits writing.""" return "w" in self or "a" in self or "+" in self or "x" in self @property def appending(self): # type: () -> bool """`bool`: `True` if the mode permits appending.""" return "a" in self @property def updating(self): # type: () -> bool """`bool`: `True` if the mode permits both reading and writing.""" return "+" in self @property def truncate(self): # type: () -> bool """`bool`: `True` if the mode would truncate an existing file.""" return "w" in self or "x" in self @property def exclusive(self): # type: () -> bool """`bool`: `True` if the mode require exclusive creation.""" return "x" in self @property def binary(self): # type: () -> bool """`bool`: `True` if a mode specifies binary.""" return "b" in self @property def text(self): # type: () -> bool """`bool`: `True` if a mode specifies text.""" return "t" in self or "b" not in self def check_readable(mode): # type: (Text) -> bool """Check a mode string allows reading. Arguments: mode (str): A mode string, e.g. ``"rt"`` Returns: bool: `True` if the mode allows reading. """ return Mode(mode).reading def check_writable(mode): # type: (Text) -> bool """Check a mode string allows writing. Arguments: mode (str): A mode string, e.g. ``"wt"`` Returns: bool: `True` if the mode allows writing. """ return Mode(mode).writing def validate_open_mode(mode): # type: (Text) -> None """Check ``mode`` parameter of `~fs.base.FS.open` is valid. Arguments: mode (str): Mode parameter. Raises: `ValueError` if mode is not valid. """ Mode(mode) def validate_openbin_mode(mode, _valid_chars=frozenset("rwxab+")): # type: (Text, Union[Set[Text], FrozenSet[Text]]) -> None """Check ``mode`` parameter of `~fs.base.FS.openbin` is valid. Arguments: mode (str): Mode parameter. Raises: `ValueError` if mode is not valid. """ if "t" in mode: raise ValueError("text mode not valid in openbin") if not mode: raise ValueError("mode must not be empty") if mode[0] not in "rwxa": raise ValueError("mode must start with 'r', 'w', 'a' or 'x'") if not _valid_chars.issuperset(mode): raise ValueError("mode '{}' contains invalid characters".format(mode))
{ "repo_name": "PyFilesystem/pyfilesystem2", "path": "fs/mode.py", "copies": "1", "size": "6243", "license": "mit", "hash": 5674202532995827000, "line_mean": 25.0125, "line_max": 82, "alpha_frac": 0.5567835976, "autogenerated": false, "ratio": 3.8872976338729766, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49440812314729765, "avg_score": null, "num_lines": null }
""" Abstraction around partitionable data Some data can be partitioned into disjoint pieces. Examples include lists, tuples, and arrays of various flavors. This module provides functions to abstract away the handling of such partitions. It proposes three operations, each of these operations deals with data (like a numpy array) and a *partition-key* which is some token to define a part of the data. In the case of numpy (and other array-like data) a natural partition-key might be a tuple of slices, e.g. (slice(0, 10), slice(0, 10)) :: partition-key Our three operations: partitions :: data -> [partition-keys] Get list or list-of-lists of partition-keys partition_get :: data, partition-key -> partition Get a particular partition from a dataset partition_set :: data, partition-key, value -> void Set the value of a particular partition in a dataset Using these three operations we should be able to write down very simple and abstract algorithms like "copy" def copy(in_data, out_data): for in_key, out_key in zip(partitions(in_data), partitions(out_data)): data = partition_get(in_data, in_key) partition_set(out_data, out_key, data) """ from .dispatch import dispatch import numpy as np from math import ceil import h5py import toolz import itertools import bcolz Array = (np.ndarray, h5py.Dataset, bcolz.ctable, bcolz.carray) @dispatch(Array, object) def partition_get(data, part, chunksize=None): return data[part] @dispatch(Array, object, object) def partition_set(data, part, value, chunksize=None): data[part] = value.squeeze() return data def slices1d(n, k): """ >>> slices1d(10, 5) [slice(0, 5, None), slice(5, 10, None)] >>> slices1d(10, 6) [slice(0, 6, None), slice(6, 10, None)] >>> slices1d(10, 1) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ if k == 1: return list(range(n)) return [slice(k*i, min(k*(i + 1), n)) for i in range(int(ceil(float(n)/k)))] def tuplepack(x): if isinstance(x, (tuple, list)): return tuple(x) else: return (x,) @dispatch(Array) def partitions(data, chunksize=None): per_dim = map(slices1d, data.shape, chunksize) return itertools.product(*per_dim) def flatten(x): """ >>> flatten([[1]]) [1] >>> flatten([[1, 2], [3, 4]]) [1, 2, 3, 4] >>> flatten([[[1], [2]], [[3], [4]]]) [1, 2, 3, 4] """ if isinstance(x[0], list): return list(toolz.concat(map(flatten, x))) else: return x
{ "repo_name": "vitan/blaze", "path": "blaze/partition.py", "copies": "1", "size": "2564", "license": "bsd-3-clause", "hash": 1701972865561021200, "line_mean": 24.64, "line_max": 80, "alpha_frac": 0.639625585, "autogenerated": false, "ratio": 3.3428943937418514, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9477124917013456, "avg_score": 0.0010790123456790124, "num_lines": 100 }
"""Abstraction between production pusher and local test.""" import json import logging import os from google.appengine.api import urlfetch import pusher from common import public_creds from pi import simple_pusher class PusherWrapper(object): """Wraps the pusher client library, """ def __init__(self, **kwargs): # Hack, but some people won't have this (when running locally) from common import creds self._pusher_client = pusher.Pusher( app_id=creds.PUSHER_APP_ID, key=public_creds.pusher_key, secret=creds.PUSHER_SECRET, **kwargs) def push(self, channel_id, batch): self._pusher_client[channel_id].trigger('events', batch) class SimplePusherWrapper(object): def __init__(self, encoder=None): self._encoder = encoder if encoder is not None else json.JSONEncoder def push(self, channel_id, batch): url = 'http://localhost:%d/%s' % (simple_pusher.HTTP_PORT, channel_id) payload = self._encoder().encode(batch) urlfetch.fetch(url=url, payload=payload, method=urlfetch.POST) def should_use_local(): return os.environ['APPLICATION_ID'].startswith('dev') def get_client(**kwargs): # If we are running in local mode, # we want to push events to our local, # hacked up server. if should_use_local(): logging.info('Using local simple pusher.') return SimplePusherWrapper(**kwargs) else: return PusherWrapper(**kwargs)
{ "repo_name": "tomwilkie/awesomation", "path": "src/appengine/pusher_client.py", "copies": "1", "size": "1413", "license": "mit", "hash": -4112861670076288000, "line_mean": 27.26, "line_max": 74, "alpha_frac": 0.7020523708, "autogenerated": false, "ratio": 3.5772151898734177, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9737605529185167, "avg_score": 0.0083324062976502, "num_lines": 50 }
"""Abstraction for Arcyd's conduit operations.""" # ============================================================================= # CONTENTS # ----------------------------------------------------------------------------- # abdt_conduitmock # # Public Classes: # ConduitMockData # .assert_is_user # .create_empty_revision_as_user # .assert_is_revision # .get_revision # .is_unchanged # .set_changed # .set_unchanged # .accept_the_only_review # .users # .revisions # ConduitMock # .describe # .create_comment # .create_empty_revision_as_user # .get_commit_message # .create_revision_as_user # .query_name_and_phid_from_email # .query_users_from_emails # .parse_commit_message # .is_review_accepted # .is_review_abandoned # .is_review_recently_updated # .update_revision # .set_requires_revision # .close_revision # .abandon_revision # .accept_revision_as_user # .commandeer_revision_as_user # # ----------------------------------------------------------------------------- # (this contents block is generated, edits will be lost) # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import phlcon_differential import phldef_conduit import phlsys_tracedecorator import abdt_exception def _mock_to_str(mock): return "conduitmock" class _RevisionStates(object): abandoned = 'abandoned' accepted = 'accepted' closed = 'closed' in_review = 'in-review' needs_revision = 'needs-revision' class _Revision(object): def __init__(self, revisionid, author): super(_Revision, self).__init__() self.revisionid = revisionid self.author = author self._status = None self.set_in_review() def set_abandoned(self): self._status = _RevisionStates.abandoned def set_accepted(self): self._status = _RevisionStates.accepted def set_closed(self): self._status = _RevisionStates.closed def set_in_review(self): self._status = _RevisionStates.in_review def set_needs_revision(self): self._status = _RevisionStates.needs_revision def is_abandoned(self): return self._status == _RevisionStates.abandoned def is_accepted(self): return self._status == _RevisionStates.accepted def is_closed(self): return self._status == _RevisionStates.closed class _User(object): def __init__(self, username, email, phid): super(_User, self).__init__() self.username = username self.email = email self.phid = phid class ConduitMockData(object): def __init__(self): """Initialise a new ConduitMockData.""" self._revisions = [] self._users = [] self._users.append(_User( phldef_conduit.ALICE.user, phldef_conduit.ALICE.email, phldef_conduit.ALICE.phid)) self._users.append(_User( phldef_conduit.BOB.user, phldef_conduit.BOB.email, phldef_conduit.BOB.phid)) self._users.append(_User( phldef_conduit.PHAB.user, phldef_conduit.PHAB.email, phldef_conduit.PHAB.phid)) self._firstid = 101 self._nextid = self._firstid self._no_write_attempts = True def assert_is_user(self, username): for user in self._users: if user.username == username: return assert False def create_empty_revision_as_user(self, username): """Return the id of a newly created empty revision as 'username'. :username: username for the author of the revision :returns: id of created revision """ self.assert_is_user(username) revisionid = self._nextid self._nextid += 1 self._revisions.append(_Revision(revisionid, username)) self.set_changed() return revisionid def assert_is_revision(self, revisionid): revisionid = int(revisionid) assert revisionid >= self._firstid assert revisionid < self._nextid def get_revision(self, revisionid): revisionid = int(revisionid) self.assert_is_revision(revisionid) index = revisionid - self._firstid return self._revisions[index] def is_unchanged(self): """Return true if this conduit has not been written to.""" return self._no_write_attempts def set_changed(self): """Reset the unchanged status to the supplied 'value'.""" self._no_write_attempts = False def set_unchanged(self): """Reset the unchanged status to the supplied 'value'.""" self._no_write_attempts = True def accept_the_only_review(self): """Set the only review as accepted.""" assert len(self._revisions) == 1 self._revisions[0].set_accepted() @property def users(self): return self._users @property def revisions(self): return self._revisions class ConduitMock(object): def __init__(self, data=None): """Initialise a new ConduitMock.""" super(ConduitMock, self).__init__() self._data = data if self._data is None: self._data = ConduitMockData() phlsys_tracedecorator.decorate_object_methods(self, _mock_to_str) def describe(self): """Return a string description of this conduit for a human to read. :returns: a string """ return 'abdt_conduitmock.ConduitMock' def create_comment(self, revision, message, silent=False): """Make a comment on the specified 'revision'. :revision: id of the revision to comment on :message: the string message to leave as a comment, may be empty :silent: mail notifications won't be sent if False :returns: None """ # unused parameters _ = silent # NOQA self._data.assert_is_revision(revision) str(message) # test that message can be converted to string self._data.set_changed() def create_empty_revision_as_user(self, username): """Return the id of a newly created empty revision as 'username'. :username: username for the author of the revision :returns: id of created revision """ return self._data.create_empty_revision_as_user(username) def get_commit_message(self, revisionid): """Return the string commit message appropriate for supplied revision. :revisionid: the id of the revision to create a message for :returns: the string of the commit message """ self._data.assert_is_revision(revisionid) return 'DUMMY COMMIT MESSAGE' def create_revision_as_user( self, raw_diff, fields, username): """Return the id of a newly created revision based on specified args. See phlcon_differential.MessageFields for some examples of valid input for specified 'fields'. :raw_diff: raw output string from e.g. 'git diff master...' :fields: dict of string attributes, required: 'title' and 'testPlan' :username: username for the author of the revision :returns: id of created revision """ assert raw_diff assert fields return self.create_empty_revision_as_user(username) def query_name_and_phid_from_email(self, email): """Return a (username, phid) tuple based on the provided email. If an email does not correspond to a user then None is returned. :email: a strings of the user's email address :returns: a (username, phid) tuple """ result = None for u in self._data.users: if u.email == email: result = u.username, u.phid return result def query_users_from_emails(self, emails): """Return a list of username strings based on the provided emails. If an email does not correspond to a username then None is inserted in its place. :emails: a list of strings corresponding to user email addresses :returns: a list of strings corresponding to Phabricator usernames """ usernames = [] for e in emails: next_username = None for u in self._data.users: if u.email == e: next_username = u.username usernames.append(next_username) return usernames def parse_commit_message(self, message): """Return a ParseCommitMessageResponse based on 'message'. :message: a string message to parse :returns: a phlcon_differential.ParseCommitMessageResponse """ fields = {'title': 'title', 'testPlan': 'test plan'} errors = None assert message return phlcon_differential.ParseCommitMessageResponse( fields=fields, errors=errors) def is_review_accepted(self, revisionid): """Return True if the supplied 'revisionid' is in 'accepted' status. :revisionid: id of the Differential revision to query :returns: True if accepted """ revision = self._data.get_revision(revisionid) return revision.is_accepted() def is_review_abandoned(self, revisionid): """Return True if the supplied 'revisionid' is in 'abandoned' status. :revisionid: id of the Differential revision to query :returns: True if abandoned """ revision = self._data.get_revision(revisionid) return revision.is_abandoned() def is_review_recently_updated(self, revisionid): """Return True if the supplied 'revisionid' was updated recently. 'recently' is a subjective term, in the context of a review it seems reasonable that if it hasn't been updated for at least a day then it could be considered as not recently updated. :revisionid: id of the Differential revision to query :returns: True if recently updated """ return True def update_revision(self, revisionid, raw_diff, message): """Update an existing Differential revision with a new diff. :revisionid: id of the Differential revision to update :raw_diff: raw output string from e.g. 'git diff master...' :message: string message to annotate the update event with :returns: None """ revision = self._data.get_revision(revisionid) assert raw_diff assert message # match the behaviour asserted by phlcon_differential__t, # we can't update a closed review, that's an error if revision.is_closed(): raise abdt_exception.AbdUserException( "can't update a closed revision") # match the behaviour asserted by phlcon_differential__t, 'accepted' is # a sticky state as far as updating the review is concerned if not revision.is_accepted(): revision.set_in_review() self._data.set_changed() def set_requires_revision(self, revisionid): """Set an existing Differential revision to 'requires revision'. :revisionid: id of the Differential revision to update :returns: None """ revision = self._data.get_revision(revisionid) assert not revision.is_closed() revision.set_needs_revision() self._data.set_changed() def close_revision(self, revisionid): """Set an existing Differential revision to 'closed'. :revisionid: id of the Differential revision to close :returns: None """ revision = self._data.get_revision(revisionid) assert revision.is_accepted() revision.set_closed() self._data.set_changed() def abandon_revision(self, revisionid): """Set an existing Differential revision to 'abandoned'. :revisionid: id of the Differential revision to close :returns: None """ revision = self._data.get_revision(revisionid) assert not revision.is_closed() revision.set_abandoned() self._data.set_changed() def accept_revision_as_user(self, revisionid, username): """Set an existing Differential revision to 'accepted'. :revisionid: id of the Differential revision to accept :username: username for the reviewer of the revision :returns: None """ revision = self._data.get_revision(revisionid) self._data.assert_is_user(username) assert not revision.is_closed() assert revision.author != username revision.set_accepted() self._data.set_changed() def commandeer_revision_as_user(self, revisionid, username): """Change the author of a revision to the specified 'username'. :revisionid: id of the Differential revision to claim :username: username for the author of the revision :returns: None """ revision = self._data.get_revision(revisionid) self._data.assert_is_user(username) assert not revision.is_closed() assert revision.author != username revision.author = username self._data.set_changed() # ----------------------------------------------------------------------------- # Copyright (C) 2013-2014 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
{ "repo_name": "kjedruczyk/phabricator-tools", "path": "py/abd/abdt_conduitmock.py", "copies": "4", "size": "14124", "license": "apache-2.0", "hash": 3132277596013437400, "line_mean": 30.7393258427, "line_max": 79, "alpha_frac": 0.6118663268, "autogenerated": false, "ratio": 4.1688311688311686, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6780697495631168, "avg_score": null, "num_lines": null }
"""Abstraction for Arcyd's git operations.""" # ============================================================================= # CONTENTS # ----------------------------------------------------------------------------- # abdt_git # # Public Classes: # Repo # .is_identical # .get_remote_branches # .checkout_forced_new_branch # .raw_diff_range # .get_range_hashes # .make_revisions_from_hashes # .squash_merge # .archive_to_landed # .push_landed # .archive_to_abandoned # .push_abandoned # .push_asymmetrical # .push # .push_delete # .checkout_master_fetch_prune # .hash_ref_pairs # .checkout_make_raw_diff # .get_remote # # Public Functions: # get_managed_branches # checkout_master_fetch_special_refs # # Public Assignments: # ARCYD_LANDED_REF # ARCYD_LANDED_BRANCH_FQ # ARCYD_ABANDONED_REF # ARCYD_ABANDONED_BRANCH_FQ # # ----------------------------------------------------------------------------- # (this contents block is generated, edits will be lost) # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import phlgit_branch import phlgit_checkout import phlgit_commit import phlgit_diff import phlgit_fetch import phlgit_log import phlgit_merge import phlgit_push import phlgit_showref import phlgitu_ref import abdt_branch import abdt_lander import abdt_logging import abdt_naming _ARCYD_REFSPACE = 'refs/arcyd' _PRIVATE_ARCYD_BRANCHSPACE = '__private_arcyd' _LANDED_ARCHIVE_BRANCH_MESSAGE = """ Create an archive branch for landed branches Landed branches will be automatically merged here by Arcyd for your reference. This branch is useful for: o: cleaning up branches contained by the landed branch (see 'git branch --merged') o: finding the pre-landed version of a commit (see 'git log --grep' - you can search for the landed sha1) o: keeping track of Arcyd's landing activity (see 'git log --first-parent') """.strip() ARCYD_LANDED_REF = "{}/landed".format(_ARCYD_REFSPACE) _ARCYD_LANDED_BRANCH = "{}/landed".format(_PRIVATE_ARCYD_BRANCHSPACE) ARCYD_LANDED_BRANCH_FQ = "refs/heads/" + _ARCYD_LANDED_BRANCH _ABANDONED_ARCHIVE_BRANCH_MESSAGE = """ Create an archive branch for abandoned branches Abandoned branches will be automatically merged here by Arcyd for your reference. This branch is useful for: o: keeping track of Arcyd's abandoning activity (see 'git log --first-parent') o: recovering abandoned branches (use 'git branch <branch name> <commit hash>') """.strip() ARCYD_ABANDONED_REF = "{}/abandoned".format(_ARCYD_REFSPACE) _ARCYD_ABANDONED_BRANCH = "{}/abandoned".format(_PRIVATE_ARCYD_BRANCHSPACE) ARCYD_ABANDONED_BRANCH_FQ = "refs/heads/" + _ARCYD_ABANDONED_BRANCH class Repo(object): def __init__( self, refcache_repo, differ_cache, remote, description): """Initialise a new Repo. :repo: a callable supporting git commands, e.g. repo("status") :remote: name of the remote to use :description: short identification of the repo for humans :returns: None """ super(Repo, self).__init__() self._repo = refcache_repo self._remote = remote self._description = description self._is_landing_archive_enabled = None self._differ_cache = differ_cache def is_identical(self, branch1, branch2): """Return True if the branches point to the same commit. :branch1: string name of branch :branch2: string name of branch :returns: True if the branches point to the same commit """ return phlgit_branch.is_identical(self, branch1, branch2) def _is_ref(self, ref): """Return True if the specified ref exists, otherwise False. :ref: the string name of the ref to look up :return: True if the specified ref exists, otherwise False """ ref_names = phlgit_showref.names(self) return ref in ref_names def get_remote_branches(self): """Return a list of string names of remote branches. :returns: list of string names """ return phlgit_branch.get_remote(self, self._remote) def checkout_forced_new_branch(self, new_name, based_on): """Overwrite and checkout 'new_name' as a new branch from 'based_on'. :new_name: the string name of the branch to create and overwrite :based_on: the string name of the branch to copy :returns: None """ phlgit_checkout.new_branch_force_based_on( self, new_name, based_on) # TODO: split this into more functions with varying context def raw_diff_range(self, base, to, context=None): """Return a string of the unified diff between 'base' and 'to'. Note that the output is based on 'git diff base...to', so the commits are diff'ed via thier common ancestry. :base: the commit or branch name to start from :to: the commit or branch name to end with :context: integer amount of surrounding context to include :returns: string of the unified diff """ return phlgit_diff.raw_diff_range(self, base, to, context) def get_range_hashes(self, start, end): """Return a list of strings of commit hashes from 'start' to 'end'. The list begins with the revision closest to but not including 'start'. Raise a ValueError if any of the returned values are not valid hexadecimal. :start: a reference that log will understand :end: a reference that log will understand :returns: a list of strings of commit hashes from 'start' to 'end'. """ return phlgit_log.get_range_hashes(self, start, end) def make_revisions_from_hashes(self, hashes): """Return a list of 'phlgit_log__Revision' from 'hashes'. Raise an exception if the repo does not return a valid FullMessage from any of 'hashes'. :hashes: a list of commit hash strings :returns: a list of 'phlgit_log__Revision' """ return phlgit_log.make_revisions_from_hashes(self, hashes) def squash_merge(self, branch, message, author_name, author_email): """Return output from Git performing a squash merge. :branch: string name of branch to merge into HEAD :message: string message for the merge commit :author_name: string name of author for the merge commit :author_email: string email of author for the merge commit :returns: string of Git output """ # TODO: test that the author is set correctly return phlgit_merge.squash( self, branch, message, author_name + " <" + author_email + ">") def _checkout_archive_ref_branch( self, short_branch_name, fq_branch_name, initial_message): if self._is_ref(fq_branch_name): phlgit_checkout.branch(self, short_branch_name) else: phlgit_checkout.orphan_clean(self, short_branch_name) phlgit_commit.allow_empty(self, initial_message) def archive_to_landed( self, review_hash, review_branch, base_branch, land_hash, message): """Merge the specified review branch to the 'landed' archive branch. :review_hash: the string of the commit hash to archive :review_branch: the string name of the branch to archive :base_branch: the string name of the branch the review is branched off :land_hash: the string of the commit hash the branch landed with :message: the string commit message the the branch landed with :returns: None """ self._checkout_archive_ref_branch( _ARCYD_LANDED_BRANCH, ARCYD_LANDED_BRANCH_FQ, _LANDED_ARCHIVE_BRANCH_MESSAGE) new_message = "landed {} on {} as {}\n\nwith message:\n{}".format( review_branch, base_branch, land_hash, message) phlgit_merge.ours(self, review_hash, new_message) def push_landed(self): """Push the 'landed' archive branch to the remote. :returns: None """ self.push_asymmetrical(ARCYD_LANDED_BRANCH_FQ, ARCYD_LANDED_REF) def archive_to_abandoned( self, review_hash, review_branch, base_branch): """Merge the specified review branch to the 'abandoned' archive branch. :review_hash: the string of the commit hash to archive :review_branch: the string name of the branch to archive :base_branch: the string name of the branch the review is branched off :returns: None """ # get on the archive branch, create new orphan if necessary self._checkout_archive_ref_branch( _ARCYD_ABANDONED_BRANCH, ARCYD_ABANDONED_BRANCH_FQ, _ABANDONED_ARCHIVE_BRANCH_MESSAGE) new_message = "abandoned {}, branched from {}".format( review_branch, base_branch) phlgit_merge.ours(self, review_hash, new_message) def push_abandoned(self): """Push the 'abandoned' archive branch to the remote. :returns: None """ self.push_asymmetrical( ARCYD_ABANDONED_BRANCH_FQ, ARCYD_ABANDONED_REF) def push_asymmetrical(self, local_branch, remote_branch): """Push 'local_branch' as 'remote_branch' to the remote. :local_branch: string name of the branch to push :remote_branch: string name of the branch on the remote :returns: None """ phlgit_push.push_asymmetrical( self, local_branch, remote_branch, self._remote) def push(self, branch): """Push 'branch' to the remote. :branch: string name of the branch to push :returns: None """ phlgit_push.push(self, branch, self._remote) def push_delete(self, branch, *args): """Delete 'branch' from the remote. :branch: string name of the branch :*args: (optional) more string names of branches :returns: None """ phlgit_push.delete(self, self._remote, branch, *args) def checkout_master_fetch_prune(self): """Checkout master, fetch from the remote and prune branches. Please see checkout_master_fetch_special_refs() for why we must checkout master first. :returns: None """ checkout_master_fetch_special_refs(self, self._remote) @property def hash_ref_pairs(self): """Return a list of (sha1, name) tuples from the repo's list of refs. :repo: a callable supporting git commands, e.g. repo("status") :returns: a list of (sha1, name) """ return self._repo.hash_ref_pairs def checkout_make_raw_diff( self, from_branch, to_branch, max_diff_size_utf8_bytes): """Return an abdt_differ.DiffResult of the changes on the branch. If the diff would exceed the pre-specified max diff size then take measures to reduce the diff. Potentially checkout onto the 'to_branch' so that changes to .gitattributes files will be considered. :from_branch: string name of the merge-base of 'branch' :to_branch: string name of the branch to diff :max_diff_size_utf8_bytes: the maximum allowed size of the diff as utf8 :returns: the string diff of the changes on the branch """ return self._differ_cache.checkout_make_raw_diff( from_branch, to_branch, max_diff_size_utf8_bytes) def _log_read_call(self, args, kwargs): with abdt_logging.remote_io_read_event_context( 'git-{}'.format(args[0]), '{}: {} {}'.format( self._description, ' '.join(args), kwargs)): return self._repo(*args, **kwargs) def __call__(self, *args, **kwargs): if args: if args[0] == 'push': with abdt_logging.remote_io_write_event_context( 'git-push', '{}: {} {}'.format( self._description, ' '.join(args), kwargs)): return self._repo(*args, **kwargs) elif args[0] in ('fetch', 'pull', 'ls-remote'): # N.B. git-archive may also read but we're not using it return self._log_read_call(args, kwargs) elif len(args) >= 2 and args[:2] == ('remote', 'prune'): return self._log_read_call(args, kwargs) return self._repo(*args, **kwargs) def get_remote(self): return self._remote def _get_branch_to_hash(repo): remote = repo.get_remote() hash_ref_list = repo.hash_ref_pairs def is_remote(ref): return phlgitu_ref.is_under_remote(ref, remote) # XXX: can't use dictionary comprehensions until the linters don't complain full_to_short = phlgitu_ref.fq_remote_to_short_local branch_to_hash = dict([ (full_to_short(r), h) for h, r in hash_ref_list if is_remote(r) ]) return branch_to_hash def get_managed_branches(repo, repo_desc, naming, branch_link_callable=None): branch_to_hash = _get_branch_to_hash(repo) branch_pairs = abdt_naming.get_branch_pairs(branch_to_hash.keys(), naming) managed_branches = [] lander = abdt_lander.squash for b in branch_pairs: branch_url = None review_branch = b.review tracker_branch = b.tracker assert review_branch is not None or tracker_branch is not None review_hash = None tracker_hash = None if review_branch is not None: review_hash = branch_to_hash[review_branch.branch] if branch_link_callable: branch_url = branch_link_callable(review_branch.branch) if tracker_branch is not None: tracker_hash = branch_to_hash[tracker_branch.branch] managed_branches.append( abdt_branch.Branch( repo, review_branch, review_hash, tracker_branch, tracker_hash, lander, repo_desc, branch_url)) return managed_branches def checkout_master_fetch_special_refs(repo, remote): # fetch the 'landed' and 'abandoned' refs, if they exist # We must checkout master before fetching the special refs to the # local branches. Otherwise we might be attempting to overwrite the # current branch with fetch, which would fail. phlgit_checkout.branch(repo, 'master') branch_refspec = '+refs/heads/*:refs/remotes/origin/*' arcyd_refspec = '+{refspace}/*:refs/heads/{branchspace}/*'.format( refspace=_ARCYD_REFSPACE, branchspace=_PRIVATE_ARCYD_BRANCHSPACE) refspec_list = [branch_refspec, arcyd_refspec] phlgit_fetch.prune_safe(repo, remote, refspec_list) # ----------------------------------------------------------------------------- # Copyright (C) 2013-2014 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
{ "repo_name": "cs-shadow/phabricator-tools", "path": "py/abd/abdt_git.py", "copies": "4", "size": "15801", "license": "apache-2.0", "hash": 2532396324153288700, "line_mean": 32.2652631579, "line_max": 79, "alpha_frac": 0.6164799696, "autogenerated": false, "ratio": 3.827761627906977, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 475 }
"""Abstraction for communicating with i2c servo cape by Sean Ketring """ import time import bot.lib.lib as lib import smbus class ServoCape(object): """Protocol: Cape expects to recieve a 6 byte array if the first byte is 0x00 next 5 bytes are servo angles. otherwise the number of a demo. """ def __init__(self, cape_config): """Initialize vars""" self.logger = lib.get_logger() self.bot_config = lib.get_config() self.bus = smbus.SMBus(cape_config["i2c_bus"]) self.addr = cape_config["i2c_addr"] #TODO(Ahmed): Figure out how to use regs self.reg = 0xFF if self.bot_config["test_mode"]["servo_cape"]: self.logger.debug("running in test mode") else: self.logger.debug("non test-mode, real hardware") @lib.api_call def transmit_block(self, array): try: self.bus.write_i2c_block_data(self.addr, self.reg, array) except IOError as err: self.logger.debug(err) return err
{ "repo_name": "IEEERobotics/bot", "path": "bot/hardware/servo_cape.py", "copies": "1", "size": "1152", "license": "bsd-2-clause", "hash": 6617001060802618000, "line_mean": 25.1818181818, "line_max": 68, "alpha_frac": 0.5546875, "autogenerated": false, "ratio": 3.7281553398058254, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47828428398058254, "avg_score": null, "num_lines": null }
"""Abstraction for dealing with products and the files they contain.""" import os from xml.dom.minidom import parseString from .coda_aware import CODA_Aware class Product(CODA_Aware): """A CODA product, composed mainly of NetCDF files.""" def __init__(self, uuid, work_dir=""): self.files = [] self.work_dir = work_dir self.uuid = uuid uuid_query = "Products('{}')".format(self.uuid) nodes = self.query(uuid_query + "/Nodes") dom = parseString(nodes.text) self.root = dom.getElementsByTagName("title").item( 1).firstChild.nodeValue try: os.makedirs(os.path.join(work_dir, self.root)) except FileExistsError: pass manifest_file = self.get("xfdumanifest.xml") with open(manifest_file) as manifest: manifest_dom = parseString(manifest.read()) files = [x.attributes.get("href").nodeValue for x in manifest_dom.getElementsByTagName("fileLocation")] self.files = [f if not f.startswith("./") else f[2:] for f in files] def get(self, filename): """If needed, retrieve, and return absolute path to file.""" if self.files and not filename in self.files: raise FileNotFoundError("The requested file is not part of this " "product.") path = os.path.join(self.work_dir, self.root, filename) if not os.path.exists(path): # Get the file from the server and put it there result = self.query("Products('{}')/Nodes('{}')/Nodes('{" "}')/$value".format(self.uuid, self.root, filename)) with open(path, "wb") as f: for chunk in result.iter_content(chunk_size=1024): if chunk: f.write(chunk) return os.path.abspath(path)
{ "repo_name": "erget/Presence", "path": "codaPresence/product.py", "copies": "1", "size": "1964", "license": "mit", "hash": -52098398387407240, "line_mean": 37.5098039216, "line_max": 77, "alpha_frac": 0.5570264766, "autogenerated": false, "ratio": 4.278867102396514, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5335893578996515, "avg_score": null, "num_lines": null }
"""Abstraction for invoking a lambda function.""" import json from typing import Any, Optional, Dict, List, Union, Tuple # noqa from chalice.config import DeployedResources # noqa from chalice.awsclient import TypedAWSClient # noqa from chalice.utils import UI # noqa from chalice.compat import StringIO OptStr = Optional[str] _ERROR_KEY = 'FunctionError' _ERROR_VALUE = 'Unhandled' def _response_is_error(response): # type: (Dict[str, Any]) -> bool return response.get(_ERROR_KEY) == _ERROR_VALUE class UnhandledLambdaError(Exception): pass class LambdaInvokeHandler(object): """Handler class to coordinate making an invoke call to lambda. This class takes a LambdaInvoker, a LambdaResponseFormatter, and a UI object in order to make an invoke call against lambda, format the response and render it to the UI. """ def __init__(self, invoker, formatter, ui): # type: (LambdaInvoker, LambdaResponseFormatter, UI) -> None self._invoker = invoker self._formatter = formatter self._ui = ui def invoke(self, payload=None): # type: (OptStr) -> None response = self._invoker.invoke(payload) formatted_response = self._formatter.format_response(response) if _response_is_error(response): self._ui.error(formatted_response) raise UnhandledLambdaError() self._ui.write(formatted_response) class LambdaInvoker(object): def __init__(self, lambda_arn, client): # type: (str, TypedAWSClient) -> None self._lambda_arn = lambda_arn self._client = client def invoke(self, payload=None): # type: (OptStr) -> Dict[str, Any] return self._client.invoke_function( self._lambda_arn, payload=payload ) class LambdaResponseFormatter(object): _PAYLOAD_KEY = 'Payload' _TRACEBACK_HEADING = 'Traceback (most recent call last):\n' def format_response(self, response): # type: (Dict[str, Any]) -> str formatted = StringIO() payload = response[self._PAYLOAD_KEY].read() if _response_is_error(response): self._format_error(formatted, payload) else: self._format_success(formatted, payload) return str(formatted.getvalue()) def _format_error(self, formatted, payload): # type: (StringIO, bytes) -> None loaded_error = json.loads(payload) error_message = loaded_error['errorMessage'] error_type = loaded_error.get('errorType') stack_trace = loaded_error.get('stackTrace') if stack_trace is not None: self._format_stacktrace(formatted, stack_trace) if error_type is not None: formatted.write('{}: {}\n'.format(error_type, error_message)) else: formatted.write('{}\n'.format(error_message)) def _format_stacktrace(self, formatted, stack_trace): # type: (StringIO, List[List[Union[str, int]]]) -> None formatted.write(self._TRACEBACK_HEADING) for frame in stack_trace: self._format_frame(formatted, frame) def _format_frame(self, formatted, frame): # type: (StringIO, Union[str, List[Union[str, int]]]) -> None if isinstance(frame, list): # If the output is a list, it came from a 4-tuple as a result of # an extract_tb call. This is the behavior up to and including # python 3.6. path, lineno, function, code = frame formatted.write( ' File "{}", line {}, in {}\n'.format(path, lineno, function)) formatted.write( ' {}\n'.format(code)) else: # If it is not a list, its a string. This is because the 4-tuple # was replaced with a FrameSummary object which is serialized as # a string by Lambda. In this case we can just print it directly. formatted.write(frame) def _format_success(self, formatted, payload): # type: (StringIO, bytes) -> None formatted.write('{}\n'.format(str(payload.decode('utf-8'))))
{ "repo_name": "awslabs/chalice", "path": "chalice/invoke.py", "copies": "1", "size": "4144", "license": "apache-2.0", "hash": -2818401438951032000, "line_mean": 34.1186440678, "line_max": 79, "alpha_frac": 0.6213803089, "autogenerated": false, "ratio": 4.019398642095053, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5140778950995053, "avg_score": null, "num_lines": null }