prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
import math import sys import re valuePattern = re.compile('= (.+)$') def extractValue(line): match = re.sea
rch(valuePattern, line) if match: return float.fromhex(match.group(1)) else: return "ERROR" intervalPattern = re.compile('= \[(.*?), (.*?)\]') def extractInterval(line): match = re.search(intervalPattern, line) if match: lower = float.fromhex(match.group(1))
upper = float.fromhex(match.group(2)) return (lower, upper) else: return "ERROR" def isInInterval(value, lower, upper): return lower<=value and value<=upper #f1 - values, f2 - ranges f1 = open(str(sys.argv[1]), 'r') f2 = open(str(sys.argv[2]), 'r') wide = 0 total = 0 result = 0 for line1, line2 in zip(f1.readlines(), f2.readlines()): total+=1 value = extractValue(line1) lower, upper = extractInterval(line2) if math.isnan(value): if math.isfinite(lower) and math.isfinite(upper): print(line1) print(line2) result = 1 continue if lower!=upper: wide+=1 if not isInInterval(value, lower, upper): print(line1) print(line2) result = 1 print(total, wide) f1.close() f2.close() sys.exit(result)
# create a Session object by sessionmaker import os import ConfigParser import sqlalchemy.orm # get path to taskmanager. it is assumed that this script is in the lib directory of # the taskmanager package. tmpath = os.path.normpath( os.path.join( os.path.dirname( os.path.realpath(__file__) ) + '/..' ) ) etcpath = '%s/etc' % tmpath # for configuration files # library is in the same folder from hDatabase import Base class hDBSessionMaker( object ): def __init__( self, configFileName=None, createTables=False, echo=False ): if not configFileName: # use default config file etcpath = os.path.normpath( os.path.join( os.path.dirname( os.path.realpath(__file__) ) + '/../etc' ) ) # default config file for database connection configFileName = "{etcPath}/serversettings.cfg".format(etcPath=etcpath) # read config file if os.path.exists( configFileName ): config = ConfigParser.ConfigParser() config.read( configFileName ) else: sys.stderr.write( "ERROR: Could not find Config file {c}!".format( c=configFileName) ) sys.exit( -1 ) databaseDialect = config.get( 'DATABASE', 'database_dialect' ) databaseHost = config.get( 'DATABASE', 'database_host' ) databasePort = config.get( 'DATABASE', 'database_port' ) databaseName = config.get( 'DATABASE', 'database_name' ) databaseUsername = config.get( 'DATABASE', 'database_username' ) databasePassword = config.get( 'DATABASE', 'database_password' ) ## @var engine #The engine that is connected to the database #use "echo=True" for SQL printing statements to stdout
self.engine = sqlalchemy.create_engine( "{dialect}://{user}:{password}@{host}:{port}/{name}".format( dialect=databaseDialect,
user=databaseUsername, password=databasePassword, host=databaseHost, port=databasePort, name=databaseName), pool_size=50, # number of connections to keep open inside the connection pool max_overflow=100, # number of connections to allow in connection pool "overflow", that is connections that can be opened above and beyond the pool_size setting, which defaults to five. pool_recycle=3600, # this setting causes the pool to recycle connections after the given number of seconds has passed. echo=False ) # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all( self.engine ) ## @var DBsession # define a Session class which will serve as a factory for new Session objects # # http://docs.sqlalchemy.org/en/rel_0_9/orm/session.html: # Session is a regular Python class which can be directly instantiated. However, to standardize how sessions are # configured and acquired, the sessionmaker class is normally used to create a top level Session configuration # which can then be used throughout an application without the need to repeat the configurational arguments. # sessionmaker() is a Session factory. A factory is just something that produces a new object when called. # # Thread local factory for sessions. See http://docs.sqlalchemy.org/en/rel_0_9/orm/session.html#contextual-thread-local-sessions # SessionFactory = sqlalchemy.orm.sessionmaker( bind = self.engine ) self.DBSession = sqlalchemy.orm.scoped_session( SessionFactory )
break else: xlog.warn("entity header:%s", line) break payload += self.rfile.read(chunk_size) get_crlf(self.rfile) gae_handler.handler(self.command, self.path, request_headers, payload, self.wfile) def do_CONNECT(self): touch_active() host, _, port = self.path.rpartition(':') if host in config.HOSTS_GAE: return self.do_CONNECT_AGENT() if host in config.HOSTS_DIRECT: return self.do_CONNECT_DIRECT() if host.endswith(config.HOSTS_GAE_ENDSWITH): return self.do_CONNECT_AGENT() if host.endswith(config.HOSTS_DIRECT_ENDSWITH): return self.do_CONNECT_DIRECT() return self.do_CONNECT_AGENT() def do_CONNECT_AGENT(self): """deploy fake cert to client""" # GAE supports the following HTTP methods: GET, POST, HEAD, PUT, DELETE, and PATCH host, _, port = self.path.rpartition(':') port = int(port) certfile = CertUtil.get_cert(host) xlog.info('GAE %s %s:%d ', self.command, host, port) self.__realconnection = None self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n') try: ssl_sock = ssl.wrap_socket(self.connection, keyfile=certfile, certfile=certfile, server_side=True) except ssl.SSLError as e: xlog.info('ssl error: %s, create full domain cert for host:%s', e, host) certfile = CertUtil.get_cert(host, full_name=True) return except Exception as e: if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET): xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0]) return self.__realconnection = self.connection self.__realwfile = self.wfile self.__realrfile = self.rfile self.connection = ssl_sock self.rfile = self.connection.makefile('rb', self.bufsize) self.wfile = self.connection.makefile('wb', 0) try: self.raw_requestline = self.rfile.rea
dline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) xlog.warn("read request line len:%d", len(self.raw_requestline)) return if not self.raw_requestl
ine: xlog.warn("read request line empty") return if not self.parse_request(): xlog.warn("parse request fail:%s", self.raw_requestline) return except NetWorkIOError as e: if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE): xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0]) raise if self.path[0] == '/' and host: self.path = 'https://%s%s' % (self.headers['Host'], self.path) if self.path == "https://www.twitter.com/xxnet": # for web_ui status page # auto detect browser proxy setting is work xlog.debug("CONNECT %s %s", self.command, self.path) return self.wfile.write(self.self_check_response_data) xlog.debug('GAE CONNECT %s %s', self.command, self.path) if self.command not in self.gae_support_methods: if host.endswith(".google.com") or host.endswith(config.HOSTS_DIRECT_ENDSWITH) or host.endswith(config.HOSTS_GAE_ENDSWITH): if host in config.HOSTS_GAE: gae_set = [s for s in config.HOSTS_GAE] gae_set.remove(host) config.HOSTS_GAE = tuple(gae_set) if host not in config.HOSTS_DIRECT: fwd_set = [s for s in config.HOSTS_DIRECT] fwd_set.append(host) config.HOSTS_DIRECT = tuple(fwd_set) xlog.warn("Method %s not support in GAE, Redirect to DIRECT for %s", self.command, self.path) return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\n\r\n' % self.path).encode()) else: xlog.warn("Method %s not support in GAEProxy for %s", self.command, self.path) return self.wfile.write(('HTTP/1.1 404 Not Found\r\n\r\n').encode()) try: if self.path[0] == '/' and host: self.path = 'http://%s%s' % (host, self.path) elif not host and '://' in self.path: host = urlparse.urlparse(self.path).netloc self.parsed_url = urlparse.urlparse(self.path) return self.do_AGENT() except NetWorkIOError as e: if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE): raise finally: if self.__realconnection: try: self.__realconnection.shutdown(socket.SHUT_WR) self.__realconnection.close() except NetWorkIOError: pass finally: self.__realconnection = None def do_CONNECT_DIRECT(self): """deploy fake cert to client""" host, _, port = self.path.rpartition(':') port = int(port) if port != 443: xlog.warn("CONNECT %s port:%d not support", host, port) return certfile = CertUtil.get_cert(host) xlog.info('GAE %s %s:%d ', self.command, host, port) self.__realconnection = None self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n') try: ssl_sock = ssl.wrap_socket(self.connection, keyfile=certfile, certfile=certfile, server_side=True) except ssl.SSLError as e: xlog.info('ssl error: %s, create full domain cert for host:%s', e, host) certfile = CertUtil.get_cert(host, full_name=True) return except Exception as e: if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET): xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0]) return self.__realconnection = self.connection self.__realwfile = self.wfile self.__realrfile = self.rfile self.connection = ssl_sock self.rfile = self.connection.makefile('rb', self.bufsize) self.wfile = self.connection.makefile('wb', 0) try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if not self.raw_requestline: self.close_connection = 1 return if not self.parse_request(): return except NetWorkIOError as e: if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE): raise if self.path[0] == '/' and host: self.path = 'https://%s%s' % (self.headers['Host'], self.path) xlog.debug('GAE CONNECT Direct %s %s', self.command, self.path) try: if self.path[0] == '/' and host: self.path = 'http://%s%s' % (host, self.path) elif not host and '://' in self.path: host = urlparse.urlparse(self.path).netloc self.parsed_url = urlparse.urlparse(self.path) if len(self.parsed_url[4]): path = '?'.join([self.parsed_url[2], self.parsed_url[4]]) else: path = self.parsed_url[2] request_headers = dict((k.title(), v) for k, v in self.headers.items()) payload = b'' if 'Content-Length' in request_headers: try: payload_len = int(request_headers.get('Content
def func(bar): """ \\some comment @param bar: The pa
rameter value.
@type bar: Its type.""" pass
esults def _dataset_for_stmt(ds, extra_test, body, init_state): """Overload of for_stmt that iterates over TF Datasets.""" if extra_test is not None: raise NotImplementedError( 'break and return statements are not yet supported in ' 'for/Dataset loops.') def reduce_body(state, iterate): new_state = body(iterate, *state) return new_state if init_state: return ds.reduce(init_state, reduce_body) # Workaround for Datset.reduce not allowing empty state tensors - create # a dummy state variable that remains unused. def reduce_body_with_dummy_state(state, iterate): reduce_body((), iterate) return state ds.reduce((constant_op.constant(0),), reduce_body_with_dummy_state) return () def while_stmt(test, body, init_state, opts=None): """Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. Args: test: Callable with the state as arguments, and boolean return type. The loop condition. body: Callable with the state as arguments, and state as return type. The actual loop body. init_state: Tuple containing the initial state. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state. """ # Evaluate the initial test once in order to do the dispatch. The evaluation # is isolated to minimize unwanted side effects. # TODO(mdan): Do a full iteration - some state types might lower to Tensor. with func_graph.FuncGraph('tmp').as_default(): init_test = test(*init_state) # TensorFlow: Multiple evaluations are acceptable in this case, so we're fine # with the re-evaluation of `test` that `_tf_while_stmt` will make. if tensor_util.is_tensor(init_test): return _tf_while_stmt(test, body, init_state, opts) # Normal Python: We already consumed one evaluation of `test`; consistently, # unroll one iteration before dispatching to a normal loop. # TODO(mdan): Push the "init_test" value via opts into _py_while_stmt? if not init_test: return init_state init_state = body(*init_state) return _py_while_stmt(test, body, init_state, opts) def _tf_while_stmt(test, body, init_state, opts): """Overload of while_stmt that stages a TF while_stmt.""" if opts is None: opts = {} undefined = tuple(filter(special_values.is_undefined, init_state)) if undefined: raise ValueError( 'TensorFlow requires that the following symbols must be initialized ' 'to a Tensor, Variable or TensorArray before the loop: {}'.format( tuple(s.symbol_name for s in undefined))) # Non-v2 while_loop unpacks the results when there is only one return value. # This enforces consistency across versions. opts['return_same_structure'] = True retval = control_flow_ops.while_loop(test, body, init_state, **opts) return retval class _PythonLoopChecker(object): """Verifies Python loops for TF-specific limits.""" def __init__(self): self.iterations = 0 self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL # Triggered when we decided to test the op counts. self.check_op_count_after_iteration = False def _get_ops(self): return ops.get_default_graph().get_operations() def _check_unroll_limits(self): if LIMIT_PYTHON_ITERATIONS and self.iterations > PYTHON_MAX_ITERATIONS: raise errors.ExecutionError('Python', 'iteration limit exceeded') def _stop_checking_inefficient_unroll(self): self.check_inefficient_unroll = False self.ops_before_iteration = None def _verify_ineffcient_unroll(self): """Checks for possibly-inefficient creation of ops in a Python loop.""" assert self.ops_before_iteration is not None ops_after_iteration = self._get_ops() new_ops = tuple( op for op in ops_after_iteration if op not in self.ops_before_iteration) if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS: return False # TODO(mdan): Add location information. ag_logging.warn( 'TensorFlow ops are being created in a Python loop with large number' ' of iterations. This can lead to slow startup. Did you mean to use
a' ' TensorFlow loop? For example, `while True:` is a Python loop, and' ' `while tf.constant(True):` is a TensorFlow loop. The following' ' ops were created after iteration %s: %s', self.iterations, new_ops) return True def bef
ore_iteration(self): """Called before each iteration in a Python loop.""" if (self.check_inefficient_unroll and self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS): self.ops_before_iteration = self._get_ops() self.check_op_count_after_iteration = True def after_iteration(self): """Called after each iteration in a Python loop.""" self.iterations += 1 self._check_unroll_limits() if self.check_inefficient_unroll and self.check_op_count_after_iteration: did_warn = self._verify_ineffcient_unroll() if did_warn: self._stop_checking_inefficient_unroll() # Only warn once. elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3: # Once deciding to check the op counts, only do it for a few iterations. self._stop_checking_inefficient_unroll() def _py_while_stmt(test, body, init_state, opts): """Overload of while_stmt that executes a Python while loop.""" del opts if __debug__: checker = _PythonLoopChecker() state = init_state while test(*state): if __debug__: checker.before_iteration() state = body(*state) if __debug__: checker.after_iteration() return state def if_stmt(cond, body, orelse, get_state, set_state): """Functional form of an if statement. Args: cond: Boolean. body: Callable with no arguments, and outputs of the positive (if) branch as return type. orelse: Callable with no arguments, and outputs of the negative (else) branch as return type. get_state: Function that returns a tuple containing the values of all composite symbols modified within the conditional. This allows access to state that branches may mutate through side effects. This function is not needed and should not be called when dispatching to code matching Python's default semantics. This is useful for checkpointing to avoid unintended side-effects when staging requires evaluating all code-paths. set_state: Function to set the values of all composite symbols modified within the conditional. This is the complement to get_state, used to restore checkpointed values. The single argument a tuple containing values for each composite symbol that may be modified in a branch of the conditional. The is usually the result of a call to get_state. Returns: Tuple containing the statement outputs. """ if tensor_util.is_tensor(cond): return tf_if_stmt(cond, body, orelse, get_state, set_state) else: return _py_if_stmt(cond, body, orelse) def tf_if_stmt(cond, body, orelse, get_state, set_state): """Overload of if_stmt that stages a TF cond.""" body = _disallow_undefs(body, branch_name='if') orelse = _disallow_undefs(orelse, branch_name='else') body = _isolate_state(body, get_state, set_state) orelse = _isolate_state(orelse, get_state, set_state) # `state` currently includes the values of any composite symbols (e.g. `a.b`) # composites modified by the loop. `outputs` includes the values of basic # symbols (e.g. `a`) which cannot be passed by reference and must be returned. # See _isolate_state. # TODO(mdan): We should minimize calls to get/set_state. outputs, final_state = control_flow_ops.cond(cond, body, orelse) set_state(final_state) return outputs def _isolate_state(func, get_state, set_state): """Wraps func to (best-effort) isolate state mutations that func may do. The simplest example of state mutation is mutation of variables (via e.g.
del,), ), migrations.CreateModel( name='Ddjj', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ano', models.IntegerField()), ('tipo_ddjj_id', models.IntegerField(choices=[(0, 'alta'), (1, 'baja'), (2, 'inicial'), (3, 'anual')])), ('funcionario', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('url', models.CharField(help_text='Url DocumentCloud', max_length=255, blank=True)), ('key', models.IntegerField(help_text='Este campo lo completa el sistema.', null=True, blank=True)), ('clave', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('flag_presenta', models.IntegerField(default=1, choices=[(0, 'Si'), (1, 'No')], blank=True, help_text="<strong style='color:blue'>'Solo el PDF'</strong> si solo se muestra el pdf, ej: cartas donde declaran que la ddjj es igual a la del a\xf1o anterior", null=True, verbose_name='Carta de DDJJ')), ('obs', models.TextField(blank=True)), ('flag_search', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('visitas', models.DecimalField(null=True, max_digits=10, decimal_places=0, blank=True)), ('status', models.IntegerField(default=0, help_text='Indica si puede ser publicada', choices=[(0, 'Deshabilitado'), (1, 'Habilitado')])), ('poder_id', models.IntegerField(choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['persona'], 'db_table': 'ddjjs', 'verbose_name': 'Declaraci\xf3n Jurada', 'verbose_name_plural': 'Declaraciones Juradas', }, bases=(models.Model,), ), migrations.CreateModel( name='Jurisdiccion', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre', models.CharField(max_length=255)), ('poder_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['nombre'], 'db_table': 'jurisdiccions', }, bases=(models.Model,), ), migrations.CreateModel( name='NombreBien', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre', models.CharField(max_length=255, blank=True)), ('tipo_bien_id', models.IntegerField(null=True, blank=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['nombre'], 'db_table': 'nombre_biens', 'verbose_name_plural': 'Nombre Bienes', }, bases=(models.Model,), ), migrations.CreateModel( name='Persona', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('apellido', models.CharField(max_length=255)), ('nombre', models.CharField(max_length=255)), ('legajo', models.CharField(max_length=255, blank=True)), ('tipo_documento_id', models.IntegerField(blank=True, null=True, choices=[(0, 'dni'), (1, 'le'), (2, 'lc'), (3, 'pasaporte')])), ('documento', models.IntegerField(null=True, blank=True)), ('cuit_cuil', models.CharField(max_length=255, blank=True)), ('nacimento', models.DateField(null=True, blank=True)), ('sexo_id', models.IntegerField(blank=True, null=True, choices=[(0, 'M'), (1, 'F')])), ('estado_civil_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Casado/a'), (1, 'C\xf3nyugue'), (2, 'Divorciado/a'), (3, 'Separado'), (4, 'Soltero/a'), (5, 'U. Hecho'), (6, 'Viudo/a')])), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('tag_id', models.CharField(help_text='ID del tag en el diario La Naci\xf3n', max_length=255, blank=True)), ('tag_img_id', models.CharField(help_text='ID de la img del tag', max_length=255, blank=True)), ('tag_descripcion', models.CharField(help_text='Descripcion del tag Nacion', max_length=255, blank=True)), ('ficha_d_l', models.CharField(help_text='Url ficha de Directorio Legislativo', max_length=255, blank=True)), ], options={ 'ordering': ['apellido', 'nombre'], 'db_table': 'personas', }, bases=(models.Model,), ), migrations.CreateModel( name='PersonaCargo', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('flag_ingreso', models.IntegerField(null=True, blank=True)), ('ingreso', models.DateField(null=True, blank=True)), ('egreso', models.DateField(null=True, blank=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('cargo', models.ForeignKey(to='admin_ddjj_app.Cargo')), ('jurisdiccion', models.ForeignKey(blank=True, to='admin_ddjj_app.Jurisdiccion', null=True)), ('persona', models.ForeignKey(to='admin_ddjj_app.Persona')), ], options={ 'ordering': ['cargo'], 'db_table': 'persona_cargos', 'verbose_name_plural': 'Persona Cargos', }, bases=(models.Model,), ), migrations.CreateModel( name='TiempoControls', fields=[ ('id', models.IntegerField(serialize=False, primary_key=True))
, ('dias', models.CharField(max_length=255, blank=True))
, ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'db_table': 'tiempo_controls', }, bases=(models.Model,), ), migrations.CreateModel( name='TipoBien', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('nombre', models.CharField(max_length=255, blank=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['nombre'], 'db_table': 'tipo_biens', 'verbose_name_plural': 'Tipo Bienes', }, bases=(models.Model,), ), migrations.AddField( model_name='ddjj', name='persona', field=models.ForeignKey(related_name='ddjjs', to='admin_ddjj_app.Persona'), preserve_default=True, ), migrations.AddField( model_name='ddjj', name='persona_cargo', field=models.ForeignKey(related_name='ddjjs', to='admin_ddjj_app.PersonaCargo', help_text='Indique e
import os import json import urllib import socket import subprocess as sub import string import sys MPCONF = "/etc/magicpool.conf" MPURL = "https://magicpool.org/main/download_config/${U}/${W}/${G}" SGCONF = "/home/crypto/.sgminer/sgminer.conf" def niceprint(data): return json.dumps(data,sort_keys=True,indent=4, separators=(',', ': ')).__str__() def getURL(url): try: u = urllib.urlopen(url) data = u.read() except: print("ERROR: cannot fetch url %s" %url) sys.exit(1) return data def saveConf(conf): os.system("cp %s %s" %(SGCONF,SGCONF+".old")) c = open(SGCONF,"w") c.write(niceprint(conf)) c.close() def restart(): os.system("md5sum %s | awk '{print $1}' > /tmp/get-pool.md5.1" % SGCONF) os.system("md5sum %
s | awk '{print $1}' > /tmp/get-pool.md5.2" % (SGCONF+".old")) md51 = open("/tmp/get-pool.md5.1","r") md52 = open("/tmp/get-pool.md5.2","r") if md51.read() == md52.read(): print "No changes in configuration" else: print "Found changes in configuration, restarting sgminer" #os.system('echo "quit|1" |
nc 127.0.0.1 4028') os.system('killall -USR1 sgminer') md51.close() md52.close() def getMPconf(): try: mpconf = open(MPCONF,"r") mp = json.loads(mpconf.read()) user = mp['username'] worker = mp['workeralias'] except: user = "generic" worker = "generic" return {"user":user,"worker":worker} def getMPremote(): url = MPURL mpconf = getMPconf() gpu = getGPU() s = string.Template(MPURL) mpurl = s.substitute(U=mpconf["user"],W=mpconf["worker"],G=gpu) print("Requesting URL %s" %mpurl) print(getURL(mpurl)) try: data = json.loads(getURL(mpurl)) except: print("ERROR: Cannot decode the magicpool json response") sys.exit(1) if 'ERROR' in data: print("ERROR: Some error in magicpool web server") sys.exit(1) if 'REBOOT' in data: os.execute("sudo reboot") sys.exit(2) return data def getSGconf(): try: fd_conf = open(SGCONF,"r") data = json.loads(fd_conf.read()) fd_conf.close() except: print("WARNING: cannot read current sgminer config file") data = {} return data def getGPU(): vcards = [] p = sub.Popen('lspci',stdout=sub.PIPE,stderr=sub.PIPE) output, errors = p.communicate() for pci in string.split(output,'\n'): if string.find(pci,'VGA') > 0: try: vcards.append(string.split(pci,':')[2]) except: print("Card not recognized") cards = "" for v in vcards: cards = v.replace(',','').replace('\'','').replace(' ','%20').replace('[','%5B').replace(']','%5D') return cards remoteconf = getMPremote() saveConf(remoteconf) restart() #return json.loads(getURL(MPURL)) #print(niceprint(getSGconf())) #conf["pools"] = remote["pools"] #i=0 ##while i < len(conf["pools"]): # new_u = conf["pools"][i]["user"].replace("USER",USER) # new_p = conf["pools"][i]["pass"].replace("PASS",PASS) # conf["pools"][i]["user"] = new_u # conf["pools"][i]["pass"] = new_p # i=i+1 # #print niceprint(conf) #fd_conf.close() #saveConf() #restart()
""" RUN RUN RUN ! """ from buttersalt import create_app from f
lask_script import Manager, Shell app = create_app('default') manager = Manager(app) def make_shell_context(): return dict(app=app) manager.add_command("shell", Shell(make_context=make_shell_context)) @manager.command def test(): """Run the unit tests.""" import unittest tests =
unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == "__main__": manager.run()
#!/usr/bin/env python3 # Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import random import time import typing import subprocess import argparse import http.server from urllib.parse import urlparse, parse_qs TEST_NAMESPACE = 'automtls' ISTIO_DEPLOY = 'svc-0-back-istio' LEGACY_DEPLOY = 'svc-0-back-legacy' class testHTTPServer_RequestHandler(http.server.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() query = parse_qs(urlparse(self.path).query) istio_percent = random.random() if 'istio' in query: istio_percent = float(query['istio'][0]) message = simulate_sidecar_rollout(istio_percent) self.wfile.write(bytes(message, "utf8")) return def get_deployment_replicas(namespace, deployment: str): cmd = 'kubectl get deployment {dep} -n{ns} {jsonpath}'.format( ns=namespace, dep=deployment, jsonpath='''-ojsonpath={.status.replicas}''') p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE) output = p.communicate()[0] if len(output) == 0: return 0 return int(output) def wait_deployment(namespace, deployment: str): cmd = ('kubectl rollout status deployments/{dep} -n{ns}').format( dep=deployment, ns=namespace ) print(cmd) p = subprocess.Popen(cmd.split(' ')) p.wait() def scale_deployment(namespace, deployment: str, replica: int): cmd = 'kubectl scale deployment {dep} -n{ns} --replicas {replica}'.format( dep=deployment, ns=namespace, replica=replica ) print(cmd) p = subprocess.Popen(cmd.split(' ')) p.wait() def simulate_sidecar_rollout(istio_percent: float): ''' Updates deployments with or without Envoy sidecar. wait indicates whether the command wait till all pods become ready. ''' output = 'Namespace {}, sidecar deployment: {}, nosidecar deployment: {}'.format( TEST_NAMESPACE, ISTIO_DEPLOY, LEGACY_DEPLOY) # Wait to be stablized before attempting to scale. wait_deployment(TEST_NAMESPACE, ISTIO_DEPLOY) wait_deployment(TEST_NAMESPACE, LEGACY_DEPLOY) istio_count = get_deployment_replicas(TEST_NAMESPACE, ISTIO_DEPLOY) legacy_count = get_
deployment_replicas(TEST_NAMESPACE, LEGACY_DEPLOY) total = istio_count + legacy_count output = 'sidecar replica {}, legacy replica {}\n\n'.format( istio_count, legacy_count) istio_count = int(istio_pe
rcent * total) legacy_count = total - istio_count output += ('======================================\n' 'Scale Istio count {sc}, legacy count {nsc}\n\n').format( sc=istio_count, nsc=legacy_count ) scale_deployment(TEST_NAMESPACE, ISTIO_DEPLOY, istio_count) scale_deployment(TEST_NAMESPACE, LEGACY_DEPLOY, legacy_count) print(output) return output def continuous_rollout(): ''' Simulate long running rollout, used for large performance cluster. ''' iteration = 1 while True: print('Start rollout iteration {}'.format(iteration)) message = simulate_sidecar_rollout(random.random()) iteration += 1 time.sleep(660) parser = argparse.ArgumentParser(description='Auto mTLS test runner') parser.add_argument('-m', '--mode', default='ci', type=str, help='mode, http | ci') args = parser.parse_args() if __name__ == '__main__': if args.mode == 'http': print('starting the rollout server simulation...') server_address = ('127.0.0.1', 8000) httpd = http.server.HTTPServer(server_address, testHTTPServer_RequestHandler) httpd.serve_forever() else: continuous_rollout()
from sklearn.feature_extraction.text import TfidfVectorizer import xgboost as xgb import cPickle as pickle from string import punctuation from nltk import word_tokenize from nltk.stem import snowball import numpy as np import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') from nltk.tokenize import PunktSentenceTokenizer import time stemmer = snowball.SnowballStemmer("english") ############################################################################### #OHS tokenization code def load_data(filename): ''' Load data into a data frame for use in running model ''' return pickle.load(open(filename, 'rb')) def stem_tokens(tokens, stemmer): '''Stem the tokens.''' stemmed = [] for item in tokens: stemmed.append(stemmer.stem(item)) return stemmed def OHStokenize(text): '''Tokenize & stem. Stems automatically for now. Leaving "stemmer" out of function call, so it works with TfidfVectorizer''' tokens = word_tokenize(text) stems = stem_tokens(tokens, stemmer) return stems ########################################################################### # tokenization code def seperatePunct(incomingString): ''' Input:str, Output: str with all puncuations seperated by spaces ''' outstr = '' characters = set(['!','@','#','$',"%","^","&","*",":","\\", "(",")","+","=","?","\'","\"",";","/", "{","}","[","]","<",">","~","`","|"]) for char in incomingString: if char in characters: outstr = outstr + ' ' + char + ' ' else: outstr = outstr + char return outstr def hasNumbers(inputString): ''' Input: str Output: returns a 1 if the string contains a number ''' return any(char.isdigit() for char in inputString) def text_cleaner(wordList): ''' INPUT: List of words to be tokenized OUTPUT: List of tokenized words ''' tokenziedList = [] for word in wordList: #remove these substrings from the word word = word.replace('[deleted]','') word = word.replace('&gt','') #if link, replace with linktag if 'http' in word: tokenziedList.append('LINK_TAG') continue #if reference to subreddit, replace with reddittag if '/r/' in word: tokenziedList.append('SUBREDDIT_TAG') continue #if reference to reddit user, replace with usertag if '/u/' in word: tokenziedList.append('USER_TAG') continue #if reference to twitter user, replace with usertag if '@' in word: tokenziedList.append('USER_TAG') continue #if number, replace with numtag #m8 is a word, 5'10" and 54-59, 56:48 are numbers if hasNumbers(word) and not any(char.isalpha() for char in word): tokenziedList.append('NUM_TAG') continue #seperate puncuations and add to tokenizedList newwords = seperatePunct(word).split(" ") tokenziedList.extend(newwords) return tokenziedList def mytokenize(comment): ''' Input: takes in a reddit comment as a str or unicode and tokenizes it Output: a tokenized list ''' tokenizer = PunktSentenceTokenizer() sentenceList = tokenizer.tokenize(comment) wordList = [] for sentence in sentenceList: wordList.extend(sentence.split(" ")) return text_cleaner(wordList) ############################################################################## #main def main(): print "entering main..." path = 'labeledRedditComments2.p' cvpath = 'twitter_cross_val.csv' load_tstart = time.time() print 'loading data...' df = load_data(path) dfcv = pd.read_csv(cvpath) load_tstop = time.time() #take a subset of the data for testing this code # randNums = np.random.randint(low=0,high=len(df.index),size=(200,1)) # rowList = [int(row) for row in randNums] # dfsmall = df.ix[rowList,:] nf = df #create training set and labels X = nf.body y = nf.label Xcv = dfcv['tweet_text'].values ycv = dfcv['label'].values vect_tstart = time.time()dfscore.plot.barh? print "creating vectorizer..." vect = TfidfVectorizer(stop_words='english', decode_error='ignore', tokenizer=OHStokenize) print "vectorizing..." # fit & transform comments matrix tfidf_X = vect.fit_transform(X) print "pickling vectorizer..." pickle.dump(vect, open('vect.p', 'wb')) tfidf_Xcv = vect.transform(Xcv) vect_tstop = time.time() print "converting data..." #convert to dense so that DMatrix doesn't drop cols with all zeros tfidf_Xcvd = tfidf_Xcv.todense() #data conversion to DMatrix xg_train = xgb.DMatrix(tfidf_X, label=y) xg_cv = xgb.DMatrix(tfidf_Xcvd, label=ycv) # print "loading vectorizer
..." # vect = pickle.load(open('vect.p', 'rb')) # # cvpath = 'twitter_cross_val.csv' # dfcv = pd.read_csv(cvpath) # Xcv = dfcv['tweet_text'].values # ycv = dfcv['label'].values # # print "transforming cross val data..." # tfidf_Xcv = vect.tra
nsform(Xcv) # tfidf_Xcvd = tfidf_Xcv.todense() # # xg_cv = xgb.DMatrix(tfidf_Xcvd, label=ycv) # print "loading training data..." # xg_train = xgb.DMatrix('xg_train2.buffer') # xg_cv = xgb.DMatrix('xg_cv2.buffer') train_tstart = time.time() print 'training...' #parameters param = {'max_depth':4, 'eta':0.3, 'silent':1, 'objective':'binary:logistic', 'eval_metric':'auc' } #number of boosted rounds num_round = 163 # what to apply the eval metric to # what the eval metric on these as you train to obj watchlist = [(xg_train, 'train'), (xg_cv, 'eval')] #dict with the results of the model on the eval_metric results = dict() #train model model = xgb.train(param, xg_train, num_round, watchlist, evals_result=results, #store eval results in results dic verbose_eval=True) #dont print output to screen train_tstop = time.time() print "saving model..." model.save_model('xgbfinal4.model') # # dump model # model.dump_model('dump2.raw.txt') # # # dump model with feature map # model.dump_model('dump2.nice.txt') # save dmatrix into binary buffer xg_train.save_binary('xg_train4.buffer') # xg_cv.save_binary('xg_cv2.buffer') # print "load data: {}".format(load_tstop - load_tstart) # print "tfidf: {}".format(vect_tstop - vect_tstart) # print "train: {}".format(train_tstop - train_tstart) # To load saved model: # model = xgb.Booster(model_file='../../xgb_models/xgb.model') if __name__ == '__main__': '''This script trains a TFIDF model using xgboost on the reddit corpus''' main()
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.core.prep.sphconvert Converting SPH output data to SKIRT input format. # # The functions in this module allow converting SPH data files in text column # format to the SKIRT input format. Currently supported are: # - EAGLE old column text format (compatible with SKIRT5) # - AWAT column text format # - DOLAG column text format # # There is a separate function for star and gas particles, for each format. # The arguments for each function are: # - infile: the name of the input file in foreign format # - outfile: the name of the output file in SKIRT6 format (file is overwritten) # ----------------------------------------------------------------- # Import standard modules import math as math import numpy as np # ----------------------------------------------------------------- # EAGLE column text format # ----------------------------------------------------------------- ## EAGLE star particles: # - incoming: x(kpc) y(kpc) z(kpc) t(yr) h(kpc) Z(0-1) M(Msun) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_EAGLE(infile, outfile): x,y,z,t,h,Z,M = np.loadtxt(infile, unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from EAGLE SKIRT5 output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,h*1e3,M,Z,t)), fmt="%1.9g") fid.close() ## EAGLE gas particles: # - incoming: x(kpc) y(kpc) z(kpc) SFR(?) h(kpc) Z(0-1) M(Msun) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_EAGLE(infile, outfile): x,y,z,SFR,h,Z,M = np.loadtxt(infile, unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from EAGLE SKIRT5 output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,h*1e3,M,Z)), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- # AWAT column text format # ----------------------------------------------------------------- ## AWAT star particles: # - incoming: x y z vx vy vz M ms0 mzHe mzC mzN mzO mzNe mzMg mzSi mzFe mzZ Z ts id flagfd rho h ... # - units: x,y,z,h (100kpc); M (1e12 Msun); ts(0.471Gyr) with t = (1Gyr-ts) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_AWAT(infile, outfile): x,y,z,M,Z,ts,h = np.loadtxt(infile, usecols=(0,1,2,6,17,18,22), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from AWAT output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e5,y*1e5,z*1e5,h*1e5,M*1e12,Z,1e9-ts*0.471e9)), fmt="%1.9g") fid.close() ## AWAT gas particles: # - incoming: x y z vx vy vz M rho u mzHe mzC mzN mzO mzNe mzMg mzSi mzFe mzZ id flagfd h myu nhp Temp ... # - units: x,y,z,h (100kpc); M (1e12 Msun); mzZ (Msun) so that Z=mzZ/(M*1e12) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_AWAT(infile, outfile): x,y,z,M,mzZ,h = np.loadtxt(infile, usecols=(0,1,2,6,17,20), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from AWAT output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e5,y*1e5,z*1e5,h*1e5,M*1e12,mzZ/(M*1e12))), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- #
DOLAG column text format # ----------------------------------------------------------------- # return the age of a star (in yr) given the universe ex
pansion factor when the star was born (in range 0-1) @np.vectorize def age(R): H0 = 2.3e-18 OmegaM0 = 0.27 yr = 365.25 * 24 * 3600 T0 = 13.7e9 return T0 - (2./3./H0/np.sqrt(1-OmegaM0)) * np.arcsinh(np.sqrt( (1/OmegaM0-1)*R**3 )) / yr # return the radius of a particle (in kpc) given its mass (in Msun) and density (in Msun/kpc3) @np.vectorize def radius(M,rho): return (M/rho*3/4/math.pi*64)**(1./3.) ## DOLAG star particles: # - incoming: id x y z vx vy vz M R # - units: x,y,z (kpc); M (Msun); R (0-1); assume Z=0.02 & h=1kpc; calculate t(R) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_DOLAG(infile, outfile): x,y,z,M,R = np.loadtxt(infile, usecols=(1,2,3,7,8), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from DOLAG output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,np.ones_like(x)*1e3,M,np.ones_like(x)*0.02,age(R))), fmt="%1.9g") fid.close() ## DOLAG gas particles: # - incoming: id x y z vx vy vz M rho T cf u sfr # - units: x,y,z (kpc); M (Msun); assume Z=0.02; calculate h(M,rho) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_DOLAG(infile, outfile): x,y,z,M,rho = np.loadtxt(infile, usecols=(1,2,3,7,8), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from DOLAG output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,radius(M,rho)*1e3,M,np.ones_like(x)*0.02)), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- # ULB column text format # ----------------------------------------------------------------- ## ULB gas particles: # - incoming: x y z M h rho vx vy vz ... # - units: x,y,z,h (100AU); M (Msun) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_ULB(infile, outfile): PARSEC = 3.08568e16 # 1 parsec (in m) AU = 1.496e11 # 1 AU (in m) CONV = (100. * AU) / PARSEC x,y,z,M,h = np.loadtxt(infile, usecols=(0,1,2,3,4), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from ULB output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*CONV,y*CONV,z*CONV,5*h*CONV,M,np.zeros_like(M)+0.02)), fmt="%1.9g") # inflated h! fid.close() # -----------------------------------------------------------------
fro
m OpenGLCffi.GL
ES1 import params @params(api='gles1', prms=['target', 'numAttachments', 'attachments']) def glDiscardFramebufferEXT(target, numAttachments, attachments): pass
""" :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ import pytest from tests.helpers import login_user def test_view_for_brand(jobs_admin_client): url = '/admin/jobs/' response = jobs_admin_client.get(url) assert response.status_code == 200 @pytest.fixture(scope='packag
e') def jobs_admin(make_admin): permission_ids = { 'admin.access', 'jobs.view', } admin = make_admin('JobsAdmin', permission_ids) login_user(admin.id) return admin @pytest.fixture(s
cope='package') def jobs_admin_client(make_client, admin_app, jobs_admin): return make_client(admin_app, user_id=jobs_admin.id)
DEPS =
[ 'chromium', 'chromium
_android', 'gsutil', 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/step', ]
from core_tests_base import CoreTestsBase, FakeTessagon, FakeTileSubClass class TestTile(CoreTestsBase): # Note: these tests are highly dependent on the behavior of # FakeTessagon and FakeAdaptor def test_add_vert(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0])
tile.add_vert(['top', 'left']
, 0.25, 0.75) assert tile.blend(0.25, 0.75) == [0.625, 2.875] # One vert added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] is None assert tile.verts['bottom']['left'] is None assert tile.verts['bottom']['right'] is None def test_add_vert_u_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], u_symmetric=True) tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.75, 0.75] is reflection of [0.25, 0.75] in U direction assert tile.blend(0.75, 0.75) == [0.875, 2.875] # Two verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] == tile.f(0.875, 2.875) assert tile.verts['bottom']['left'] is None assert tile.verts['bottom']['right'] is None def test_add_vert_v_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], v_symmetric=True) tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.25, 0.25] is reflection of [0.25, 0.75] in V direction assert tile.blend(0.25, 0.25) == [0.625, 2.625] # Two verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] is None assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625) assert tile.verts['bottom']['right'] is None def test_add_vert_u_v_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], u_symmetric=True, v_symmetric=True) tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.75, 0.25] is reflection of [0.25, 0.75] in U and V directions assert tile.blend(0.75, 0.25) == [0.875, 2.625] # Four verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] == tile.f(0.875, 2.875) assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625) assert tile.verts['bottom']['right'] == tile.f(0.875, 2.625)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #-------------------------------------------------------------------------------------------------- # Program Name: Lychee # Program Description: MEI document manager for formalized document control # # Filename: lychee/__init__.py # Purpose: Initialize Lychee. # # Copyright (C) 2016, 2017 Christopher Antila # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # Y
ou should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. #-------------------------------------------------------------------------------------------------- """ Initialize Lychee. """ __all__ = [ 'converters', 'document', 'exceptions', 'logs', 'nam
espaces', 'signals', 'tui', 'workflow', 'vcs', 'views', ] from lychee import * from ._version import get_versions __version__ = get_versions()['version'] del get_versions
import unittest from artifice.models import usage, tenants, resources, Session from sqlalchemy import create_engine from sqlalchemy.exc import IntegrityError, InvalidRequestError from sqlalchemy.orm.exc import FlushError import os from artifice.models.usage import Usage from artifice.models.tenants import Tenant from artifice.models.resources import Resource from datetime import datetime, timedelta TENANT_ID = "test tenant" RESOURCE_ID = "test resource" RESOURCE_ID_TWO = "A DIFFERENT RESOURCE" USAGE_ID = 12345 class SessionBase(unittest.TestCase): def setUp(self): engine = create_engine(os.environ["DATABASE_URL"]) Session.configure(bind=engine) self.session = Session() self.objects = [] self.session.rollback() def tearDown(self): self.session.rollback() for t in self.objects: try: self.session.delete(t) except InvalidRequestError: # This is fine pass self.session.commit() self.session = None class TestTenant(SessionBase): def test_create_tenant(self): t = tenants.Tenant() self.objects.append(t) t.id = TENANT_ID self.session.add(t) self.session.flush() self.session.commit() t2 = self.session.query(tenants.Tenant)\ .filter(tenants.Tenant.id == TENANT_ID)[0] self.assertTrue( t2 is not None ) self.assertEqual( t2.id, TENANT_ID ) def test_create_identical_tenant_fails(self): # First pass self.test_create_tenant() try: self.test_create_tenant() except (IntegrityError, FlushError) as e: self.assertTrue ( True ) except Exception as e: # self.fail ( e.__class__ ) self.fail ( e ) class TestResource(SessionBase): def test_create_resource(self): r = resources.Resource() t = tenants.Tenant() t.id = TENANT_ID r.tenant = t r.id = RESOURCE_ID self.session.add(r) self.session.add(t) self.objects.extend((r,t)) self.session.flush() self.session.commit() r2 = self.session.query(resources.Resource)\ .filter(resources.Resource.id == RESOURCE_ID)[0] self.assertEqual(r2.id, r.id) self.assertEqual( r2.tenant.id, t.id ) def test_create_resource_with_bad_tenant_fails(self): r = resources.Resource() t = tenants.Tenant() r.tenant = t self.objects.extend((r,t)) self.session.add(r) self.session.add(t) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) def test_create_resource_without_tenant_fails(self): r = resources.Resource() r.id = RESOURCE_ID self.session.add(r) self.objects.append(r) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) class TestUsage(SessionBase): """Tests various states of the Usage objects.""" # def setUp(self): # super(TestUsage, self).setUp() # self.resource # def tearDown(self): # pass def test_save_usage_to_database(self): r = Resource() r.id = RESOURCE_ID t = Tenant() t.id = TENANT_ID r.tenant = t self.objects.extend((r, t)) start = datetime.now() - timedelta(days=30) end = datetime.now() u = U
sage(r, t, 1, start, end ) u.id = USAGE_ID self.objects.append(u) self.session.add(u) self.session.add(r) self.s
ession.add(t) self.session.commit() u2 = self.session.query(Usage)[0] self.assertTrue( u2.resource.id == r.id ) self.assertTrue( u2.tenant.tenant.id == t.id ) self.assertTrue( u2.created == u.created ) print u2.time def test_overlap_throws_exception(self): self.test_save_usage_to_database() r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] start = datetime.now() - timedelta(days=15) end = datetime.now() u2 = Usage(r, t, 2, start, end) self.session.add(u2) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) def test_overlap_with_different_resource_succeeds(self): self.test_save_usage_to_database() t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] r = Resource() r.id = RESOURCE_ID_TWO r.tenant = t start = datetime.now() - timedelta(days=30) end = datetime.now() u = Usage(r, t, 2, start, end) self.objects.extend((r, u)) self.session.add(u) self.session.add(r) try: self.session.commit() except IntegrityError as e: self.fail("Integrity violation: %s" % e) except Exception as e: self.fail("Major exception: %s" % e) def test_non_overlap_succeeds(self): self.test_save_usage_to_database() r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] start = datetime.now() end = datetime.now() + timedelta(days=30) u = Usage(r, t, 1, start, end) self.session.add(u) try: self.session.commit() self.objects.append(u) except IntegrityError as e: self.fail("Integrity violation: %s" % e) except Exception as e: self.fail("Fail: %s" % e) def test_tenant_does_not_exist_fails(self): pass def test_resource_does_not_exist_fails(self): pass def test_resource_belongs_to_different_tenant_fails(self): self.test_save_usage_to_database() t = Tenant() t.id = "TENANT TWO" r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] start = datetime.now() end = datetime.now() + timedelta(days=30) self.session.add(t) self.objects.append(t) try: u = Usage(r, t, 1, start, end) self.session.commit() self.objects.append(u) self.fail("Should not have saved!") except (IntegrityError, AssertionError) as e : self.assertTrue(True) # Pass except Exception as e: self.fail(e.__class__)
) ), (0, 'Other'), (5, _('translated')), ) c = models.IntegerField(choices=CHOICES, null=True) class WhizDelayed(models.Model): c = models.IntegerField(choices=(), null=True) # Contrived way of adding choices later. WhizDelayed._meta.get_field('c').choices = Whiz.CHOICES class WhizIter(models.Model): c = models.IntegerField(choices=iter(Whiz.CHOICES), null=True) class WhizIterEmpty(models.Model): c = models.CharField(choices=iter(()), blank=True, max_length=1) class Choiceful(models.Model): no_choices = models.IntegerField(null=True) empty_choices = models.IntegerField(choices=(), null=True) with_choices = models.IntegerField(choices=[(1, 'A')], null=True) empty_choices_bool = models.BooleanField(choices=()) empty_choices_text = models.TextField(choices=()) class BigD(models.Model): d = models.DecimalField(max_digits=32, decimal_places=30) class FloatModel(models.Model): size = models.FloatField() class BigS(models.Model): s = models.SlugField(max_length=255) class UnicodeSlugField(models.Model): s = models.SlugField(max_length=255, allow_unicode=True) class SmallIntegerModel(models.Model): value = models.SmallIntegerField() class IntegerModel(models.Model): value = models.IntegerField() class BigIntegerModel(models.Model): value = models.BigIntegerField() null_value = models.BigIntegerField(null=True, blank=True) class PositiveSmallIntegerModel(models.Model): value = models.PositiveSmallIntegerField() class PositiveIntegerModel(models.Model): value = models.PositiveIntegerField() class Post(models.Model): title = models.CharField(max_length=100) body = models.TextField() class NullBooleanModel(models.Model): nbfield = models.BooleanField(null=True, blank=True) nbfield_old = models.NullBooleanField() class BooleanModel(models.Model): bfield = models.BooleanField() string = models.CharField(max_length=10, default='abc') class DateTimeModel(models.Model): d = models.DateField() dt = models.DateTimeField() t = models.TimeField() class DurationModel(models.Model): field = models.DurationField() class NullDurationModel(models.Model): field = models.DurationField(null=True) class PrimaryKeyCharModel(models.Model): string = models.CharField(max_length=10, primary_key=True) class FksToBooleans(models.Model): """Model with FKs to models with {Null,}BooleanField's, #15040""" bf = models.ForeignKey(BooleanModel, models.CASCADE) nbf = models.ForeignKey(NullBooleanModel, models.CASCADE) class FkToChar(models.Model): """Model with FK to a model with a CharField primary key, #19299""" out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE) class RenamedField(models.Model): modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),)) class VerboseNameField(models.Model): id = models.AutoField("verbose pk", primary_key=True) field1 = models.BigIntegerField("verbose field1") field2 = models.BooleanField("verbose field2", default=False) field3 = models.CharField("verbose field3", max_length=10) field4 = models.DateField("verbose field4") field5 = models.DateTimeField("verbose field5") field6 = models.DecimalField("verbose field6", max_digits=6, decimal_places=1) field7 = models.EmailField("verbose field7") field8 = models.FileField("verbose field8", upload_to="unused") field9 = models.FilePathField("verbose field9") field10 = models.FloatField("verbose field10") # Don't want to depend on Pillow in this test # field_image = models.ImageField("verbose field") field11 = models.IntegerField("verbose field11") field12 = models.GenericIPAddressField("verbose field12", protocol="ipv4") field13 = models.NullBooleanField("verbose field13") field14 = models.PositiveIntegerField("verbose field14") field15 = models.PositiveSmallIntegerField("verbose field15") field16 = models.SlugField("verbose field16") field17 = models.SmallIntegerField("verbose field17") field18 = models.TextField("verbose field18") field19 = models.TimeField("verbose field19") field20 = models.URLField("verbose field20") field21 = models.UUIDField("verbose field21") field22 = models.DurationField("verbose field22") class GenericIPAddress(models.Model): ip = models.GenericIPAddressField(null=True, protocol='ipv4') ############################################################################### # These models aren't used in any test, just here to ensure they validate # successfully. # See ticket #16570. class DecimalLessThanOne(models.Model): d = models.DecimalField(max_digits=3, decimal_places=3) # See ticket #18389. class FieldClassAttributeModel(models.Model): field_class = models.CharField ############################################################################### class DataModel(models.Model): short_data = models.BinaryField(max_length=10, default=b'\x08') data = models.BinaryField() ############################################################################### # FileField class Document(models.Model): myfile = models.FileField(upload_to='unused', unique=True) ############################################################################### # ImageField # If Pillow available, do these tests. if Image: class TestImageFieldFile(ImageFieldFile): """ Custom Field File class that records whether or not the underlying file was opened. """ def __init__(self, *args, **kwargs): self.was_opened = False super().__init__(*args, **kwargs) def open(self): self.was_opened = True super().open() class TestImageField(ImageField): attr_class = TestImageFieldFile # Set up a temp directory for file storage. temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) temp_upload_to_dir = os.path.join(temp_storage.location, 'tests') class Person(models.Model): """ Model that defines an ImageField with no dimension fields. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests') class AbstractPersonWithHeight(models.Model): """ Abstract model that defines an ImageField with only one dimension field to make sure the dimension update is correctly run on concrete subclass instance post-initialization. """ mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height') mugshot_height = models.PositiveSmallIntegerField() class Meta: abstract = True class PersonWithHeight(AbstractPersonWithHeight): """ Concrete model that subclass an abstract one with only on dimens
ion field. """ name = models.CharField(max_length=50) class PersonWithHeightAndWidth(models.Model): """ Model that defines height and width fields after the ImageField. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_
to='tests', height_field='mugshot_height', width_field='mugshot_width') mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() class PersonDimensionsFirst(models.Model): """ Model that defines height and width fields before the ImageField. """ name = models.CharField(max_length=50) mugshot_height = models.PositiveSmallIntegerField() mugshot_width = models.PositiveSmallIntegerField() mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height', width_field='mugshot_width') class PersonTwoImages(models.Model): """ Model
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import from petl.compat import next, string_types from petl.util.base import iterpeek, ValuesView, Table from petl.util.materialise import columns def infer_dtype(table): import numpy as np # get numpy to infer dtype it = iter(table) hdr = next(it) flds = list(map(str, hdr)) rows = tuple(it) dtype = np.rec.array(rows).dtype dtype.names = flds return dtype def construct_dtype(flds, peek, dtype): import numpy as np if dtype is None: dtype = infer_dtype(peek) elif isinstance(dtype, string_types): # insert field names from source table typestrings = [s.strip() for s in dtype.split(',')] dtype = [(f, t) for f, t in zip(flds, typestrings)] elif (isinstance(dtype, dict) and ('names' not in dtype or 'formats' not in dtype)): # allow for partial specification of dtype cols = columns(peek) newdtype = {'names': [], 'formats': []} for f in flds: newdtype['names'].append(f) if f in dtype and isinstance(dtype[f], tuple): # assume fully specified newdtype['formats'].append(dtype[f][0]) elif f not in dtype: # not specified at all a = np.array(cols[f]) newdtype['formats'].append(a.dtype) else: # assume directly specified, just need to add offset newdtype['formats'].append(dtype[f]) dtype = newdtype return dtype def toarray(table, dtype=None, count=-1, sample=1000): """ Load data from the given `table` into a `numpy <http://www.numpy.org/>`_ structured array. E.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> a = etl.toarray(table) >>> a array([('apples', 1, 2.5), ('oranges', 3, 4.4), ('pears', 7, 0.1)], dtype=(numpy.record, [('foo', '<U7'), ('bar', '<i8'), ('baz', '<f8')])) >>> # the dtype can be specified as a string ... a = etl.toarray(table, dtype='a4, i2, f4') >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i2'), ('baz', '<f4')]) >>> # the dtype can also be partially specified ... a = etl.toarray(table, dtype={'foo': 'a4'}) >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i8'), ('baz', '<f8')]) If the dtype is not completely specified, `sample` rows will be examined to infer an appropriate dtype. """ import numpy as np it = iter(table) peek, it = iterpeek(it, sample) hdr = next(it) flds = list(map(str, hdr)) dtype = construct_dtype(flds, peek, dtype) # numpy is fussy about having tuples, need to make sure it = (tuple(row) for row in it) sa = np.fromiter(it, dtype=dtype, count=count) return sa Table.toarray = toarray def torecarray(*args, **kwargs): """ Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``. """ import numpy
as np return toarray(*args, **kwargs).view(np.recarray) Table.torecarray = torecarray def fromarray(a): """ Extract a table from a `numpy <http://www.numpy.org/>`_ structured array, e.g
.:: >>> import petl as etl >>> import numpy as np >>> a = np.array([('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, 0.1)], ... dtype='U8, i4,f4') >>> table = etl.fromarray(a) >>> table +-----------+----+-----+ | f0 | f1 | f2 | +===========+====+=====+ | 'apples' | 1 | 2.5 | +-----------+----+-----+ | 'oranges' | 3 | 4.4 | +-----------+----+-----+ | 'pears' | 7 | 0.1 | +-----------+----+-----+ """ return ArrayView(a) class ArrayView(Table): def __init__(self, a): self.a = a def __iter__(self): yield tuple(self.a.dtype.names) for row in self.a: yield tuple(row) def valuestoarray(vals, dtype=None, count=-1, sample=1000): """ Load values from a table column into a `numpy <http://www.numpy.org/>`_ array, e.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> table = etl.wrap(table) >>> table.values('bar').array() array([1, 3, 7]) >>> # specify dtype ... table.values('bar').array(dtype='i4') array([1, 3, 7], dtype=int32) """ import numpy as np it = iter(vals) if dtype is None: peek, it = iterpeek(it, sample) dtype = np.array(peek).dtype a = np.fromiter(it, dtype=dtype, count=count) return a ValuesView.toarray = valuestoarray ValuesView.array = valuestoarray
# -*- coding: utf-8 -*- from nose.tools import ( eq_, raises, ) from py3oauth2.utils import ( normalize_netloc, normalize_path, normalize_query, normalize_url, ) def test_normalize_url(): eq_(normalize_url('http://a/b/c/%7Bfoo%7D'), normalize_url('hTTP://a/./b/../b/%63/%7bfoo%7d')) @raises(ValueError) def test_normalize_url_unknown_scheme(): normalize_url('example://example.com/') @raises(ValueError) def test_normalize_url_fragment(): normalize_url('http://example.com/#foo') @raises(ValueError) def test_normalize_url_invalid_port(): normalize_url('https://example.com:1bb/#foo') def test_normalize_netloc(): eq_(normalize_netloc('eXamPLe.com', 80), 'example.com') eq_(normalize_netloc('user:pass@example.com', 80), 'user:pass@example.com') eq_(normalize_netloc('user:@example.com', 80), 'user@example.com') eq_(normalize_netloc(':pass@example.com', 80), ':pass@example.com') eq_(normalize_netloc('example.com:443', 80), 'example.com:443') eq_(normalize_netloc('example.com:80', 80), 'example.com') eq_(normalize_netloc('example.com:', 80), 'example.com') def test_normalize_query(): eq_(normalize_query(''), '') eq_(normalize_query('b=c&a=b'), 'a=b&b=c') eq_(normalize_query('b&a=b'), 'a=b') eq_(normalize_query('b=&a=b'), 'a=b') eq_(normalize_query('b=%e3%81%84&a=%e3%81%82'), 'a=%E3%81%82&b=%E3%81%84') def test_normalize_path(): eq_(normalize_path(''), '/') eq_(normalize_path('//'), '/')
eq_(normalize_path('/a//b'), '/a/b/') eq_(normalize_path('/a/./b'), '/a/b/') eq_(normalize_path('/a/foo/../b'), '/a/b/') eq_(normalize_path('/%e3%81%82%a%e3%81%84'), '/%E3%81%82%a%E3%81%84/') eq_(normalize_path('/%e3%81%82a%e3%81%84'), '/%E3%81%82a%
E3%81%84/')
import os from PyQt4 import QtCore, QtGui from Extensions.Global import sizeformat class SearchWidget(QtGui.QLabel): def __init__(self, parent): QtGui.QLabel.__init__(self, parent) self._parent = parent self.setStyleSheet("""background: rgba(0, 0, 0, 50); border-radius: 0px;""") self.setFixedSize(300, 28) self.setPixmap(QtGui.QPixmap("Icons\\line")) self.setScaledContents(True) self.searchTimer = QtCore.QTimer() self.searchTimer.setSingleShot(True) self.searchTimer.setInterval(200) self.searchTimer.timeout.connect(self.gotoText) self.textFindLine = QtGui.QLineEdit(self) self.textFindLine.setStyleSheet("background: white; border-radius: 0px;") self.textFindLine.setGeometry(3, 2, 270, 23) self.textFindLine.grabKeyboard() self.textFindLine.setTextMargins(2, 1, 22, 1) self.textFindLine.textChanged.connect(self.show) self.textFindLine.textChanged.connect(self.searchTimer.start) self.clearTextFindLineButton = QtGui.QPushButton(self.textFindLine) self.clearTextFindLineButton.setGeometry(250, 2, 15, 15) self.clearTextFindLineButton.setFlat(True) self.clearTextFindLineButton.setIcon(QtGui.QIcon("Icons\\clearLeft")) self.clearTextFindLineButton.setStyleSheet("background: white; border: none;") self.clearTextFindLineButton.clicked.connect(self.textFindLine.clear) self.finderCloseButton = QtGui.QToolButton(self) self.finderCloseButton.setStyleSheet("background: none;") self.finderCloseButton.setGeometry(278, 6, 15, 15) self.finderCloseButton.setAutoRaise(True) self.finderCloseButton.setIconSize(QtCore.QSize(25, 25)) self.finderCloseButton.setIcon(QtGui.QIcon("Icons\\Cross")) self.finderCloseButton.clicked.connect(self.hide) def gotoText(self): text = self.textFindLine.text() self._parent.gotoText(text) class VaultManager(QtGui.QListWidget): def __init__(self, va
ultItemCountLabel, sizeLabel, busyIndicatorWidget, parent): QtGui.QListWidget.__init__(self, parent) self.redCenter = parent self.setLayoutMode(1) self.setBatchSize(1) self.setUniformItemSizes(True) self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.setAlternatingRowColors(True) self.setIconSize(QtCore.QSize(30, 30)) self.itemSelectionChanged.connect(self.selectionMade) searchWidget = SearchWidget(self) searchWidget.move(80, 0) searchWidget.hide() self.vaultItemCountLabel = vaultItemCountLabel self.sizeLabel = sizeLabel self.busyIndicatorWidget = busyIndicatorWidget self.vaultZeroContentLabel = QtGui.QLabel("Empty", self) self.vaultZeroContentLabel.setGeometry(150, 20, 100, 50) self.vaultZeroContentLabel.setAlignment(QtCore.Qt.AlignCenter) self.vaultZeroContentLabel.setStyleSheet("background: none; font: 20px; color: lightgrey;") self.vaultZeroContentLabel.hide() self.vaultCleanUp() def gotoText(self, text): for i in self.vaultKeyList: if self.logDict[i].split('|')[0].startswith(text): index = self.vaultKeyList.index(i) self.setCurrentRow(index) break def loadVault(self): try: logList = [] self.vaultKeyList = [] file = open("Vault\\LOG","r") for i in file.readlines(): if i.strip() == '': pass else: logList.append(tuple(i.strip().split('||'))) file.close() self.logDict = dict(logList) self.vaultContentsSize = 0 self.clear() size = QtCore.QSize() size.setHeight(40) for key, property in self.logDict.items(): self.vaultKeyList.append(key) ## extract attributes attrib = self.logDict[key].split('|') # get locking time time_split = key.split('=')[0].split('-') date = QtCore.QDate(int(time_split[0]), int(time_split[1]), int(time_split[3])).toString() item = QtGui.QListWidgetItem(attrib[0]) item.setToolTip('Original Location: ' + attrib[2] + '\nModified: ' + date) item.setSizeHint(size) # assign icon if attrib[1] == "exec": item.setIcon(QtGui.QIcon("Icons\\executable")) else: item.setIcon(QtGui.QIcon("Icons\\unknown")) self.addItem(item) self.vaultContentsSize += int(attrib[3]) self.vaultItemCountLabel.setText("Items: " + str(len(self.logDict))) # display size of total files self.sizeLabel.setText(sizeformat(self.vaultContentsSize)) self.showVaultEmptyLabel() except: self.redCenter.showMessage("Problem loading items in the vault.") self.redCenter.hideMessage() def showVaultEmptyLabel(self): if self.count() > 0: self.vaultZeroContentLabel.hide() else: self.vaultZeroContentLabel.show() def selectionMade(self): self.selected = self.selectedItems() if len(self.selected) > 0: self.redCenter.unlockButton.setEnabled(True) self.redCenter.deleteButton.setEnabled(True) else: self.redCenter.unlockButton.setEnabled(False) self.redCenter.deleteButton.setEnabled(False) def vaultCleanUp(self): logList = [] file = open("Vault\\LOG","r") for i in file.readlines(): if i.strip() == '': pass else: logList.append(tuple(i.strip().split('||'))) file.close() logDict = dict(logList) filesList = os.listdir("Vault\\Files") bookedFilesList = [] for i, v in logDict.items(): bookedFilesList.append(i) for i in filesList: if i not in bookedFilesList: path = os.path.join("Vault\\Files", i) try: os.remove(path) except: pass
oding: utf-8 -*- """ This is part of HashBruteStation software Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station License: MIT Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en) Integration tests for HashlistsByAlgLoaderThread """ import sys import time import pytest sys.path.append('../../') from libs.common import md5 from classes.HashlistsByAlgLoaderThread import HashlistsByAlgLoaderThread from classes.HashlistsLoaderThread import HashlistsLoaderThread from CommonIntegration import CommonIntegration class Test_HashlistsByAlgLoaderThread(CommonIntegration): """ Class of integration tests - HashlistsByAlgLoaderThread """ thrd = None loader_thrd = None def setup(self): """ Tests setup """
self._clean_db() self.thrd = HashlistsByAlgLoaderThread() self.thrd.delay_per_check = 1 self.thrd.catch_exceptions = False self.loader_thrd = HashlistsLoaderThread() self.loader_thrd.delay_per_check = 1 self.loader_thrd.catch_
exceptions = False def teardown(self): """ Tests teardown """ if isinstance(self.thrd, HashlistsByAlgLoaderThread): self.thrd.available = False time.sleep(1) del self.thrd if isinstance(self.loader_thrd, HashlistsLoaderThread): self.loader_thrd.available = False time.sleep(1) del self.loader_thrd self._clean_db() test_data = [ ( [ {'hash': 'a', 'salt': '\\ta\'1\\', 'summ': md5('a:\\ta\'1\\')}, {'hash': 'b', 'salt': '\\nb"2\\', 'summ': md5('b:\\nb"2\\')} ], 1 ), ( [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1')}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2')} ], 1 ), ( [ {'hash': 'a', 'salt': '', 'summ': md5('a')}, {'hash': 'b', 'salt': '', 'summ': md5('b')} ], 0 ), ] @pytest.mark.parametrize("hashes,have_salt", test_data) def test_simple_build(self, hashes, have_salt): """ Simple common hashlist build :param hashes: hashes rows :param have_salt: Does alg has salt? :return: """ self._add_hashlist(have_salts=have_salt) for _hash in hashes: self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ']) assert self.db.fetch_one("SELECT id FROM hashlists WHERE common_by_alg") is None self.thrd.start() self.loader_thrd.start() time.sleep(5) test_hashlist_data = {'id': 2, 'name': 'All-MD4', 'have_salts': have_salt, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 2, 'errors': '', 'parsed': 1, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") assert int(self.db.fetch_one("SELECT when_loaded FROM hashlists WHERE common_by_alg")) > 0 for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] for _hash in hashes: assert self.db.fetch_one( "SELECT COUNT(id) FROM hashes WHERE hash = {0} AND salt={1} AND summ = {2} AND hashlist_id = 2". format(self.db.quote(_hash['hash']), self.db.quote(_hash['salt']), self.db.quote(_hash['summ'])) ) == 1 test_data = [ ( [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1'), 'cracked': 0}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2'), 'cracked': 1}, {'hash': 'c', 'salt': '3', 'summ': md5('c:3'), 'cracked': 0}, {'hash': 'd', 'salt': '4', 'summ': md5('d:4'), 'cracked': 0}, ], [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1'), 'cracked': 0}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2'), 'cracked': 0}, ], 1 ), ( [ {'hash': 'a', 'salt': '', 'summ': md5('a'), 'cracked': 0}, {'hash': 'b', 'salt': '', 'summ': md5('b'), 'cracked': 1}, {'hash': 'c', 'salt': '', 'summ': md5('c'), 'cracked': 0}, {'hash': 'd', 'salt': '', 'summ': md5('d'), 'cracked': 0}, ], [ {'hash': 'a', 'salt': '', 'summ': md5('a'), 'cracked': 0}, {'hash': 'b', 'salt': '', 'summ': md5('b'), 'cracked': 0}, ], 0 ), ] @pytest.mark.parametrize("hashes_in_self,hashes_in_common,have_salt", test_data) def test_update_exists_list(self, hashes_in_self, hashes_in_common, have_salt): """ Updating exists common hashlist :param hashes_in_self: Hashes in usual hashlist :param hashes_in_common: Hashes in common hashlist :param have_salt: Does alg has salt? :return: """ self._add_hashlist(have_salts=have_salt) for _hash in hashes_in_self: self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'], cracked=_hash['cracked']) self._add_hashlist(id=2, alg_id=3, common_by_alg=3, have_salts=have_salt) for _hash in hashes_in_common: self._add_hash( hashlist_id=2, hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'], cracked=_hash['cracked'] ) assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='b'") == 2 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='c'") == 1 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='d'") == 1 self.thrd.start() self.loader_thrd.start() time.sleep(5) assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='b'") == 1 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='c'") == 2 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='d'") == 2 assert [{'hash': 'a'}, {'hash': 'c'}, {'hash': 'd'}] \ == self.db.fetch_all("SELECT hash FROM hashes WHERE hashlist_id = 2") test_data = [('outparsing'), ('waitoutparse')] @pytest.mark.parametrize("status", test_data) def test_build_with_parsing_alg(self, status): """ Try build no ready hashlist :param status: :return: """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') self._add_hashlist(id=2, alg_id=3, common_by_alg=0) self._add_work_task(hashlist_id=2, status=status) assert self.db.fetch_one("SELECT id FROM hashlists WHERE common_by_alg") is None self.thrd.start() self.loader_thrd.start() time.sleep(5) test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 0, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] self.db.update("task_works", {'status': 'wait'}, 'id=1') time.sleep(5) test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 0, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 2, 'errors': '', 'parsed': 1, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] assert self.db.fetch_all("SELECT hash FROM hashes WHERE has
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION
FOR EXAMPLES from swgpy.object import * def create(kernel): result = Weapon() result.template = "object/weapon/melee/polearm/crafted_saber/shared_sword_lightsaber_polearm_s1_gen2.iff" result.attribute_template_id = 10 result.stfName("weapon_name","sword
_lightsaber_lance_type1") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
############################################################################### # # Copyright (c) 2007, 2008 OpenHex SPRL. (http://openhex.com) All Rights # Reserved. # # This program is free software; you can redistribute it and/or
modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. S
ee the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### __metaclass__ = type import genshi.core from genshi.template import NewTextTemplate, MarkupTemplate from relatorio.reporting import MIMETemplateLoader class RelatorioStream(genshi.core.Stream): "Base class for the relatorio streams." def render(self, method=None, encoding='utf-8', out=None, **kwargs): "calls the serializer to render the template" return self.serializer(self.events) def serialize(self, method='xml', **kwargs): "generates the bitstream corresponding to the template" return self.render(method, **kwargs) def __or__(self, function): "Support for the bitwise operator" return RelatorioStream(self.events | function, self.serializer) MIMETemplateLoader.add_factory('text', NewTextTemplate) MIMETemplateLoader.add_factory('xml', MarkupTemplate)
from gerencianet import Gerencianet from credentials import CREDENTIALS gn = Gerencianet(CREDENTIALS) params = { 'txid': '' } body = {
'calendario': { 'expiracao': 3600 }, 'devedor': { 'cpf': '', 'nome': '' }, 'valor': { 'original': '0.50' }, 'chave': '', 'solicitacaoPagador': 'Cobrança dos ser
viços prestados.' } response = gn.pix_create_charge(params=params,body=body) print(response)
from alphatwirl.selection.factories.AllFactory import AllFactory from alphatwirl.selection.modules.basic import All from alphatwirl.selection.modules.basic import Any from alphatwirl.selection.modules.LambdaStr import LambdaS
tr import unittest ##__________________________________________________________________|| class Te
st_AllFactory(unittest.TestCase): def test_obj(self): path_cfg_list = ("ev : ev.nJet[0] >= 2", "ev : ev.nMET[0] >= 200") kargs = dict(arg1 = 10, arg2 = 20, AllClass = All, LambdaStrClass = LambdaStr) obj = AllFactory(path_cfg_list, name = 'test_all', **kargs) ##__________________________________________________________________||
erModes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with FiberModes. If not, see <http://www.gnu.org/licenses/>. """Scalar, list, range or code object. This is a convenient object used to encapsulate a parameter that can be either a scalar (float), a list of floats, a range, or a function (code). """ import math import logging import numpy class SLRC(object): """Scalar, list, range or code object. Args: value(mixed): Initial value. Values are assumed to be always sorted. If the value is a `list` or a `numpy.ndarray`, it uses the value inside the list. If the value is a `dict`, it assumes keys `start`, `end`, and `num` to be set, and it creates a range of num values from start to end (included), just like `numpy.linspace`. If the value is a `str`, if assumes this is Python code to be evaluated. This code is evaluated in a restricted environment, where builtins are listed in `rglobals`. `math` module is also available. The code is assumed called inside a function definition, and must return a scalar value. Otherwise, the value is assumed to be a scalar (float or int). """ logger = logging.getLogger(__name__) #: Allowed builtins for code. It includes the math module. rglobals = { '__builtins__': { 'abs': abs, 'all': all, 'any': any, 'bool': bool, 'complex': complex, 'dict': dict, 'divmod': divmod, 'enumerate': enumerate, 'filter': filter, 'float': float, 'frozenset': frozenset, 'int': int, 'iter': iter, 'len': len, 'list': list, 'map': map, 'max': max, 'min': min, 'next': next, 'pow': pow, 'range': range, 'reversed': reversed, 'round': round, 'set': set, 'slice': slice, 'sorted': sorted, 'str': str, 'sum': sum, 'tuple': tuple, 'zip': zip }, 'math': math } def __init__(self, value=0): self.codeParams = None SLRC.value.fset(self, value) @property def value(self): """Return evaluated value of object. Warning: When set, does not check the assigned value. Returns: The return value can be a float, a list, or a function. Use the type attribute if you need to know what kind it is. """ k = self.kind if k == 'range': low = self._value['start'] high = self._value['end'] n = self._value['num'] if n > 1: return [low + index*(high-low)/(n-1) for index in range(n)] elif n == 1: return [low] else: return [] elif k == 'code': cp = ", ".join(self.codeParams) + ", " if self.codeParams else "" code = "def f({}*args, **kwargs):\n".format(cp) for line in self._value.splitlines(): code += " {}\n".format(line) loc = {} exec(code, self.rglobals, loc) return loc['f'] else: return self._value @value.setter def value(self, value): if isinstance(value, SLRC): self._value = value._value else: self._value = value if self.kind == 'list': self._value = sorted(value) self.logger.debug("Value set to {}".format(self._value)) def __iter__(self): k = self.kind if k == 'list': yield from iter(self._value) elif k == 'range': yield from iter(self.value) else: yield self.value def __len__(self): k = self.kind if k == 'list': return len(self._value) elif k == 'range': return self._value['num'] else: return 1 def __getitem__(self, index): if index >= len(self): raise IndexError k = self.kind if k == 'list': return self._value[index] elif k == 'range': low = self._value['start'] high = self._value['end'] n = self._value['num'] return low + index*(high-low)/(n-1) if n > 1 else low else: return self.value @property def kind(self): """Find what is the kind of value. When read, the property returns a string identifying the kind of value contained. When set, the property converts the actual value to a new kind. Conversion is performed as described in the following table. Cases in **bold** are converted without loss of information. Case in *italic* is converted with possible loss of information. Other cases are converted with systematic loss of information. ========== ========== ====== From To Result ========== ========== ====== **scalar** **scalar** No change **scalar** **list** List with one item **scalar** **range** Range with one item **scalar** **code** Return the value list scalar First item of the list **list** **list** No change *list* *range* Range from first item to last item with same number of elements (but intermediate values could be different) list code Return value of the first item range scalar First item of the range **range** **list** List with items of the range **range** **range** No change range code Return first item of the range code scalar 0 code list [0] code range {'start': 0, 'end': 1, 'num': 2} **code** **code** No change ========== ========== ====== Returns: string. It can be 'scalar', 'list', 'range', or 'code'. """ if isinstance(self._value, list): return 'list' elif isinstance(self._value, numpy.ndarray): return 'list' elif isinstance(self._value, str): return 'code' elif isinstance(self._value, dict): return 'range' else: return 'scalar' @kind.setter def kind(self, value): k = self.kind if k == value: return self.logger.debug("Converted from '{}': {}".format(k, self._value)) if value == 'code': if k == 'scalar': self._value = "return {}".format(self._value) elif k == 'list': self._value = "return {}".format(self._value[0]) elif k == 'range': self._value = "return {}".format(self._value['start']) elif value == 'range': if k == 'scalar': self._value = {'start': self._value, 'end': self._value, 'num': 1} elif k == 'list': self._value = {'start': min(self._value), 'end': max(self._value), 'num': len(self._value)} else: self._value = {'start': 0, 'end': 1, 'num': 2} elif value == 'list': if k == 'scalar': self._value = [self._value] elif k == 'range': low = self._value['start'] high = self._value['end'] n = self._value['num'] if n == 1: self.
_value = [
low] else: self._value = [low + index*(high
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for bubble.py.""" from google.appengine.api import users from bubble import HospitalValueInfoExtractor, ValueInfoExtractor from utils import db, HIDDEN_ATTRIBUTE_NAMES import django.utils.translation import bubble import datetime import logging import model import os import unittest import utils def fake_get_message(ns, n, locale=''): message = model.Message(ns=ns, name=n) if ns == 'attribute_value' and n == 'fake_to_localize': message.en = 'fake_localized' else: message.en = 'foo' django_locale = 'en' return message and getattr(message, django_locale) or n class BubbleTest(unittest.TestCase): def setUp(self): self.real_auth_domain = os.environ.get('AUTH_DOMAIN', '') os.environ['AUTH_DOMAIN'] = 'test' self.real_get_message = bu
bble.get_message bubble.get_message = fake_get_message utils.get_message = fake_get_message def tearDown(self): utils.get_message = self.real_get_message bubble.get_message = self.real_get_message os.environ['AUTH_DOMAIN'] = self.real_auth_domain def test_value_info_extractor(self): s = model.Subject(key_
name='haiti:example.org/123', type='hospital') s.set_attribute('title', 'title_foo', datetime.datetime.now(), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo', 'comment_foo') s.set_attribute('attribute_value', 'fake_to_localize', datetime.datetime.now(), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo', 'comment_foo') vai = ValueInfoExtractor(['title'], ['attribute_value']) (special, general, details) = vai.extract(s, ['title']) assert special['title'].raw == 'title_foo' assert general == [] assert details[0].raw == 'title_foo' (special, general, details) = vai.extract(s, ['attribute_value']) assert general[0].raw == 'fake_to_localize' assert general[0].value == 'fake_localized' assert general[0].label == 'foo' def test_hospital_value_info_extractor(self): user = users.User('test@example.com') now = datetime.datetime(2010, 6, 11, 14, 26, 52, 906773) nickname = 'nickname_foo' affiliation = 'affiliation_foo' comment = 'comment_foo' s = model.Subject(key_name='haiti:example.org/123', type='hospital') s.set_attribute('title', 'title_foo', now, user, nickname, affiliation, comment) s.set_attribute(HIDDEN_ATTRIBUTE_NAMES[0], 'hidden_value_foo', now, user, nickname, affiliation, comment) s.set_attribute('organization_name', 'value_foo', now, user, nickname, affiliation, comment) attrs = ['title', 'organization_name', HIDDEN_ATTRIBUTE_NAMES[0]] vai = HospitalValueInfoExtractor() (special, general, details) = vai.extract(s, attrs) assert special['title'].date == '2010-06-11 09:26:52 -05:00' assert special['title'].raw == 'title_foo' assert HIDDEN_ATTRIBUTE_NAMES[0] not in special assert sorted(special) == sorted(vai.special_attribute_names) assert len(general) == 1 assert len(details) == 2 assert general[0].value == 'value_foo' for detail in details: assert detail.value == 'title_foo' or detail.value == 'value_foo' assert detail.value != 'hidden_value_foo' def test_vai_get_value_info(self): s = model.Subject(key_name='example.org/123', type='hospital') s.set_attribute('title', 'title_foo', datetime.datetime(2010, 06, 01), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo\n', 'comment_\nfoo') s.set_attribute('attribute_value', 'fake_to_localize', datetime.datetime(2010, 06, 01), users.User('test@example.com'), 'nickname_foo', '\naffiliation_foo', 'comment_foo') vai = ValueInfoExtractor(['title'], ['attribute_value']) vi = vai.get_value_info(s, 'title') assert vi.label == 'foo' assert vi.raw == 'title_foo' assert vi.author == 'nickname_foo' assert vi.affiliation == 'affiliation_foo ' assert vi.comment == 'comment_ foo' assert vi.date == '2010-05-31 19:00:00 -05:00' vi = vai.get_value_info(s, 'attribute_value') assert vi.label == 'foo' assert vi.raw == 'fake_to_localize' assert vi.value == 'fake_localized' assert vi.author == 'nickname_foo' assert vi.affiliation == ' affiliation_foo' assert vi.comment == 'comment_foo' assert vi.date == '2010-05-31 19:00:00 -05:00'
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import uuid from airflow.providers.amazon.aws.hooks.kinesis import AwsFirehoseHook try: from moto import mock_kinesis except ImportError: mock_kinesis = None class TestAwsFirehoseHook(unittest.TestCase): @unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present') @mock_kinesis def test_get_conn_returns_a_boto3_connection(self): hook = AwsFirehoseHook( aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1" ) self.assertIsNotNone(hook.get_conn()) @unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present') @mock_kinesis def test_insert_batch_records_kinesis_firehose(self): hook = AwsFirehoseHook( aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1" ) response = hook.get_conn().create_delivery_stream( DeliveryStreamName="test_airflow", S3DestinationConfiguration={ 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', 'BucketARN': 'arn:aws:s3:::kinesis-test', 'Prefix': 'airflow/', 'BufferingHints': {'SizeInMBs': 123, 'IntervalInSeconds': 124}, 'Compr
essionFormat': 'UNCOMPRESSED', }, ) stream_arn = response['DeliveryStreamARN'] self.assertEqual(stream_arn, "arn:aws:firehose:us-east-1:123456789012:deliverystream/test_airflow") records = [{"Data": str(uuid.uuid4())} for _ in range(100)] response = hook.put_records(records) s
elf.assertEqual(response['FailedPutCount'], 0) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
from myhdl import always, always_seq, block, delay, enum, instance, intbv, ResetSignal, Signal, StopSimulation @block def uart_tx(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst): index = Signal(intbv(0, min=0, max=8)) st = enum('IDLE', 'START', 'DATA') state = Signal(st.IDLE) @always(tx_clk.posedge, tx_rst.negedge) def fsm(): if tx_rst == 0: tx_bit.next = 1 index.next = 0 state.next = st.IDLE else: if state == st.IDLE: tx_bit.next = 1 if tx_valid: # a pulse state.next = st.START elif state == st.START: tx_bit.next = 0 index.next = 7 state.next = st.DATA elif state == st.DATA: tx_bit.next = tx_byte[index] if index == 0: state.next = st.IDLE else: index.next = index - 1 return fsm @block def uart_tx_2(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst): index = Signal(intbv(0, min=0, max=8)) st = enum('IDLE', 'START', 'DATA') state = Signal(st.IDLE) @always_seq(tx_clk.posedge, reset=tx_rst) def fsm(): if state == st.IDLE: tx_bit.next = 1 if tx_valid: # a pulse state.next = st.START elif state == st.START: tx_bit.next = 0 index.next = 7 state.next = st.DATA elif state == st.DATA: tx_bit.next = tx_byte[index] if index == 0: state.next = st.IDLE else: index.next = index - 1 return fsm @block def tb(uart_tx): tx_bit = Signal(bool(1)) tx_valid = Signal(bool(0)) tx_byte = Signal(intbv(0)[8:]) tx_clk = Signal(bool(0)) # tx_rst = Signal(bool(1)) tx_rst = ResetSignal(1, active=0, isasync=True) uart_tx_inst = uart_tx(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst) # toVerilog(uart_tx, tx_bit, tx_valid, tx_byte, tx_clk, tx_rst) @always(delay(10)) def clk_gen(): tx_clk.next = not tx_clk @instance def stimulus(): tx_rst.next = 1 yield delay(100) tx_rst.next = 0 yield delay(100) tx_rst.next = 1 yiel
d delay(100) for v in (0x00, 0xff, 0x55, 0xaa): yield tx_clk.negedge tx_byte.next = v tx_valid.next = 1 yield tx_clk.negedge tx_valid.next = 0 yield delay(16 * 20) raise StopSimulation return clk_gen, stimulus, uart_tx_inst dut = uart_tx_2 inst = tb(dut) inst.config_sim(trace=True) inst.run_sim(10000)
import os from setuptools import setup, fin
d_packages with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='poloniex', version='0.1', packages=[ 'poloniex', 'poloniex.wamp', 'poloniex.api' ], incl
ude_package_data=True, description='Python Poloniex API', long_description=README, url='https://github.com/absortium/poloniex.git', author='Andrey Samokhvalov', license='MIT', author_email='andrew.shvv@gmail.com', install_requires=[ 'asyncio', 'aiohttp', 'autobahn', 'pp-ez', 'requests' ], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5', ], )
the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import Gaffer import GafferUI import GafferOSL import imath import functools _channelNamesOptions = { "RGB" : IECore.Color3fData( imath.Color3f( 1 ) ), "RGBA" : IECore.Color4fData( imath.Color4f( 1 ) ), "R" : IECore.FloatData( 1 ), "G" : IECore.FloatData( 1 ), "B" : IECore.FloatData( 1 ), "A" : IECore.FloatData( 1 ), "customChannel" : IECore.FloatData( 1 ), "customLayer" : IECore.Color3fData( imath.Color3f( 1 ) ), "customLayerRGBA" : IECore.Color4fData( imath.Color4f( 1 ) ), "closure" : None, } ########################################################################## # _ChannelsFooter ########################################################################## class _ChannelsFooter( GafferUI.PlugValueWidget ) : def __init__( self, plug ) : row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) GafferUI.PlugValueWidget.__init__( self, row, plug ) with row : GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) ) menuButton = GafferUI.MenuButton( image = "plus.png", hasFrame = False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ), title = "Add Input" ), toolTip = "Add Input" ) menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) ) GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } ) def _updateFromPlug( self ) : self.setEnabled( self._editable() ) def __menuDefinition( self ) : result = IECore.MenuDefinition() usedNames = set() for p in self.getPlug().children(): # TODO - this method for checking if a plug variesWithContext should probably live in PlugAlgo # ( it's based on Switch::variesWithContext ) sourcePlug = p["name"].source() variesWithContext = sourcePlug.direction() == Gaffer.Plug.Direction.Out and isinstance( ComputeNode, sourcePlug.node() ) if not variesWithContext: usedNames.add( p["name"].getValue() ) # Use a fixed order for some standard options that we want to list in a specific order sortedOptions = [] for label in ["RGB", "RGBA", "R", "G", "B", "A" ]: sortedOptions.append( (label, _channelNamesOptions[label] ) ) for label, defaultData in sorted( _channelNamesOptions.items() ): if not label in [ i[0] for i in sortedOptions ]: sortedOptions.append( (label, defaultData) ) categories = { "Standard" : [], "Custom" : [], "Advanced" : [] } for label, defaultData in sortedOptions: if label == "closure": categories["Advanced"].append( ( label, label, defaultData ) ) else: bareLabel = label.replace( "RGBA", "" ).replace( "RGB", "" ) channelName = bareLabel if label.startswith( "custom" ): if channelName in usedNames: suffix = 2 while True: channelName = bareLabel + str( suffix ) if not channelName in usedNames: break suffix += 1 categories["Custom"].append( ( label, channelName, defaultData ) ) else: if channelName in usedNames: continue categories["Standard"].append( ( label, channelName, defaultData ) ) for category in [ "Standard", "Custom", "Advanced" ]: for ( menuLabel, channelName, defaultData ) in categories[category]: result.append( "/" + category + "/" + menuLabel, { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), channelName, defaultData ), } ) return result def __addPlug( self, name, defaultData ) : alphaValue = None if isinstance( defaultData, IECore.Color4fData ): alphaValue = Gaffer.FloatPlug( "value", Gaffer.Plug.Direction.In, defaultData.value.a ) defaultData = IECore.Color3fData( imath.Color3f( defaultData.value.r, defaultData.value.g, defaultData.value.b ) ) if defaultData == None: plugName = "closure" name = "" valuePlug = GafferOSL.ClosurePlug( "value" ) else: plugName = "channel" valuePlug = Gaffer.PlugAlgo.createPlugFromData( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default, defaultData ) with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) : self.getPlug().addChild( Gaffer.NameValuePlug( name, valuePlug, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) ) if alphaValue: self.getPlug().addChild( Gaffer.NameValuePlug( name + ".A" if name else "A", alphaValue, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) ) def __channelLabelFromPlug( plug ): if plug.typeId() == GafferOSL.ClosurePlug.staticTypeId(): return plug.parent().getName() elif plug.typeId() == Gaffer.Color3fPlug.staticTypeId() and plug.parent()["name"].getValue() == "": return "[RGB]" else: return plug.parent()["name"].getValue() ########################################################################## # Metadata ########################################################################## Gaffer.Metadata.registerNode( GafferOSL.OSLImage, "description", """ Executes OSL shaders to perform image processing. Use the shaders from the OSL/ImageProcessing menu to read values from the input image and then write values back to it. """, "plugAdderOptions", IECore.CompoundData( _channelNamesOptions ), "layout:activator:defaultFormatActive", lambda node : not node["in"].getInput(), plugs = { "defaultFormat" : [ "description", """ The resolution and aspect ratio to output when there is no input image provided. """, "layout:activator", "defaultFormatActive", ], "channels" : [ "description", """ Define image channels to output by adding child plugs and connecting corresponding OSL shaders. You
can drive RGB layers with a color, or connect individual channels to a float. If you want to add multiple channels at once, you can also add a closure plug, which can accept a connection from an OSLCode with a combined output closure. """, "layout:customWidget:footer:widgetType", "GafferOSLUI.OSLImageUI._ChannelsFooter", "layout:customWidget:footer:index", -1, "nodule:type", "GafferUI::CompoundNo
dule", "noduleLayout:section", "left", "noduleLayout:spacing", 0.2, "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget", # Add + button for showing and hiding parameters in the GraphEditor "noduleLayout:customGadget:addButton:gadgetType", "GafferOSLUI.OSLImageUI.PlugAdder", ], "channels.*" : [ # Although the parameters plug is positioned # as we want above, we must also register # appropriate values for each individual parameter, # for the case where they get promoted to a box # individually. "noduleLayout:section", "left", "nodule:type", "GafferUI::CompoundNodule", "nameValuePlugPlugVa
class Zone
: def __init__(self, id_zone, name, region, description): self.id = id_zone self.name = name self.region = region self.description = descriptio
n
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import glob import os import shutil import subprocess import sys netlog_viewer_root_path = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) sys.path.append(netlog_viewer_root_path) import netlog_viewer_project project = netlog_viewer_project.NetlogViewerProject() src_dir = project.netlog_viewer_src_path out_dir = os.path.join(netlog_viewer_root_path, "appengine", "static") components_dir = os.path.join(project.catapult_third_party_path, "polymer", "components") if os.path.exists(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) in_html = os.path.join(src_dir, 'index.html') out_html = os.path.join(out_dir, 'vulcanized.html') try: subprocess.check_call(['vulcanize', in_html, '--inline-scripts', '--inline-css', '--strip-comments', '--redirect', '/components|' + components_dir, '--out-html', out_html]) except OSError: sys.stderr.write(''' ERROR: Could not execute "vulcanize". To install vulcanize on Linux: sudo ap
t-get install npm sudo npm install -g vulcanize '''[1:]) sys.exit(1) for fn in glob.glob
(os.path.join(src_dir, "*.png")): shutil.copyfile(fn, os.path.join(out_dir, os.path.split(fn)[1]))
# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova import objects from nova.objects import base from nova.objects import fields # TODO(berrange): Remove NovaObjectDictCompat class DNSDomain(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'domain': fields.StringField(), 'scope': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, vif, db_vif): for field in vif.fields: vif[field] = db_vif[field] vif._context = context vif.obj_reset_change
s() return vif @base.remotable_classmethod def get_by_domain(cls, context, domain): db_dnsd = db.dnsdomain_get(context, d
omain) if db_dnsd: return cls._from_db_object(context, cls(), db_dnsd) @base.remotable_classmethod def register_for_zone(cls, context, domain, zone): db.dnsdomain_register_for_zone(context, domain, zone) @base.remotable_classmethod def register_for_project(cls, context, domain, project): db.dnsdomain_register_for_project(context, domain, project) @base.remotable_classmethod def delete_by_domain(cls, context, domain): db.dnsdomain_unregister(context, domain) class DNSDomainList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('DNSDomain'), } child_versions = { '1.0': '1.0', } @base.remotable_classmethod def get_all(cls, context): db_domains = db.dnsdomain_get_all(context) return base.obj_make_list(context, cls(context), objects.DNSDomain, db_domains)
# D
efinition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def increasingBST(self, root: TreeNode) -> TreeNode: def dfs(node): if not node.left and not node.right: return node head = None if node.right: node.right = dfs(node.right)
if node.left: head = dfs(node.left) cur = head while cur.right: cur = cur.right cur.right = node return head else: return node return dfs(root) t = TreeNode(2) t.left = TreeNode(1) tt = Solution().increasingBST(t) print(tt.val, tt.right.val)
ht: This module has been placed in the public domain. """ Tests for docutils.transforms.references.Substitutions. """ from __init__ import DocutilsTestSupport from docutils.transforms.references import Substitutions from docutils.parsers.rst import Parser def suite(): parser = Parser() s = DocutilsTestSupport.TransformTestSuite(parser) s.generateTests(totest) return s totest = {} totest['substitutions'] = ((Substitutions,), [ ["""\ The |biohazard| symbol is deservedly scary-looking. .. |biohazard| image:: biohazard.png """, """\ <document source="test data"> <paragraph> The \n\ <image alt="biohazard" uri="biohazard.png"> symbol is deservedly scary-looking. <substitution_definition names="biohazard"> <image alt="biohazard" uri="biohazard.png"> """], ["""\ Here's an |unknown| substitution. """, """\ <document source="test data"> <paragraph> Here's an \n\ <problematic ids="id2" refid="id1"> |unknown| substitution. <system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR"> <paragraph> Undefined substitution referenced: "unknown". """], [u"""\ Substitutions support case differences: .. |eacute| replace:: \u00E9 .. |Eacute| replace:: \u00C9 |Eacute|\\t\\ |eacute|, and even |EACUTE|. """, u"""\ <document source="test data"> <paragraph> Substitutions support case differences: <substitution_definition names="eacute"> \u00E9 <substitution_definition names="Eacute"> \u00C9 <paragraph> \u00C9 t \u00E9 , and even \n\ \u00C9 . """], [u"""\ Indirect substitution definitions with multiple references: |substitute| my coke for gin |substitute| you for my mum at least I'll get my washing done .. |substitute| replace:: |replace| .. |replace| replace:: swap """, u"""\ <document source="test data"> <paragraph> Indirect substitution definitions with multiple references: <paragraph> swap my coke for gin swap you for my mum at least I'll get my washing done <substitution_definition names="substitute"> swap <substitution_definition names="replace"> swap """], ["""\ .. |l| unicode:: U+00AB .. left chevron .. |r| unicode:: U+00BB .. right chevron .. |.| replace:: |l|\ ``.``\ |r| .. Delete either of the following lines, and there is no error. Regular expression |.| will match any character .. Note:: Note that |.| matches *exactly* one character """, u"""\ <document source="test data"> <substitution_definition names="l"> \xab <substitution_definition names="r"> \xbb <substitution_definition names="."> \xab <literal> . \xbb <comment xml:space="preserve"> Delete either of the following lines, and there is no error. <paragraph> Regular expression \n\ \xab <literal> . \xbb will match any character <note> <paragraph> Note that \n\ \xab <literal> . \xbb matches \n\ <emphasis> exactly one character """], ["""\ .. |sub| replace:: |sub| """, """\ <document source="test data"> <system_message level="3" line="1" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |sub| """], ["""\ .. |sub| replace:: |indirect1| .. |indirect1| replace:: |indirect2| .. |indirect2| replace:: |Sub| """, """\ <document source="test data"> <system_message level="3" line="1" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |indirect1| <system_message level="3" line="2" names="indirect1" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect1| replace:: |indirect2| <system_message level="3" line="3" names="indirect2" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect2| replace:: |Sub| """], ["""\ .. |indirect1| replace:: |indirect2| .. |indirect2| replace:: |Sub| .. |sub| replace:: |indirect1| Use |sub| and |indirect1| and |sub| again (and |sub| one more time). """, """\ <document source="test data"> <system_message level="3" line="1" names="indirect1" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect1| replace:: |indirect2| <system_message level="3" line="2" names="indirect2" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect2| replace:: |Sub| <system_message level="3" line="3" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |indirect1| <paragraph> Use \n\ <problematic ids="id8" refid="id7"> and \n\ <problematic ids="id2" refid="id1"> |indirect1| and \n\ <problematic ids="id4" refid="id3"> |sub| again (and \n\ <problematic ids="id6" refid="id5"> |sub| one more time). <system_message backrefs="id2" ids="id1" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "indirect1". <system_message backrefs="id4" ids="id3" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "sub". <system_message backrefs="id6" ids="id5" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "sub". <system_message backrefs="id8" ids="id7" level="3" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "Sub". """], ]) totest['unicode'] = ((Substitutions,),
[ ["""\ Insert an em-dash (|mdash|), a copyright symbol (|copy|), a non-breaking space (|nbsp|), a backwards-not-equals (|bne|), and a captial omega (|Omega|). ..
|mdash| unicode:: 0x02014 .. |copy| unicode:: \\u00A9 .. |nbsp| unicode:: &#x000A0; .. |bne| unicode:: U0003D U020E5 .. |Omega| unicode:: U+003A9 """, u"""\ <document source="test data"> <paragraph> Insert an em-dash ( \u2014 ), a copyright symbol ( \u00a9 ), a non-breaking space ( \u00a0 ), a backwards-not-equals ( = \u20e5 ), and a captial omega ( \u03a9 ). <substitution_definition names="mdash"> \u2014 <substitution_definition names="copy"> \u00a9 <substitution_definition names="nbsp"> \u00a0 <substitution_definition names="bne"> = \u20e5 <substitution_definition names="Omega"> \u03a9 """], [""" Testing comments and extra text. Copyright |copy| 2003, |BogusMegaCorp (TM)|. .. |copy| unicode:: 0xA9 .. copyright sign .. |BogusMegaCorp (TM)| unicode:: BogusMegaCorp U+2122 .. with trademark sign """, u"""\ <document source="test data"> <paragraph> Testing comments and extra text. <paragraph> Copyright \n\ \u00a9 2003, \n\ BogusMegaCorp \u2122 . <substitution_definition names="copy"> \u00a9 <substitution_definition names="BogusMegaCorp\ (TM)"> BogusMegaCorp \u2122 """], ["""\ Inser
from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin from django.conf.urls.i18n import i18n_patterns admin.au
todiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'votainteligente
.views.home', name='home'), # url(r'^votainteligente/', include('votainteligente.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^i18n/', include('django.conf.urls.i18n')), #url(r'^', include('elections.urls')), #('^pages/', include('flatpages_i18n.urls')),#('^pages/', include('django.contrib.flatpages.urls')), #(r'^tinymce/', include('tinymce.urls')), ) urlpatterns += i18n_patterns('', url(r'^', include('elections.urls')), url(r'^page', include('flatpages_i18n.urls')), (r'^tinymce/', include('tinymce.urls')), )
''' Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). F
ind two lines, which together with x-axis forms a container, such that the container contains the most water. Note: You may not slant the container. ''' class Solution(object): def maxArea(self, height): """ :type height: List[int] :rtype: int """ lo, hi = 0, len(height) - 1 theBest = 0
while lo < hi: theBest = max(theBest, (hi - lo) * min(height[lo], height[hi])) if height[lo] < height[hi]: lo += 1 else: hi -= 1 return theBest def maxArea_TLE2(self, height): maxlen = len(height) def _max_area_as_short_side(i): left = right = 0 for j in range(i): if height[j] >= height[i]: left = height[i] * (i - j) break for j in range(maxlen - 1, i, -1): if height[j] >= height[i]: right = height[i] * (j - i) break return max(left, right) theBest = maxHeight = 0 for i in range(maxlen >> 1): if height[i] < maxHeight: continue else: maxHeight = height[i] theBest = max(theBest, _max_area_as_short_side(i)) left = theBest theBest = maxHeight = 0 for i in range(maxlen - 1, (maxlen >> 1) - 1, -1): # the mid () if height[i] < maxHeight: continue else: maxHeight = height[i] theBest = max(theBest, _max_area_as_short_side(i)) return max(left, theBest) def maxArea_TLE(self, height): maxlen = len(height) def _max_area_as_short_side(i): left = right = 0 for j in range(i): if height[j] >= height[i]: left = height[i] * (i - j) break for j in range(maxlen - 1, i, -1): if height[j] >= height[i]: right = height[i] * (j - i) break return max(left, right) return max([_max_area_as_short_side(i) for i in range(maxlen)]) if __name__ == '__main__': assert Solution().maxArea([2, 1]) == 1 assert Solution().maxArea(range(15001))
import os import discord import requests from utils import functions description = "Show bot log" perm = 2 async def ex(message, client): if not os.path.isfile("screenlog.0"): await client.send_message(message.channel, embed=discord.Embed(colour=discord.Color.red(),
description="File `screenlog.0` does not exist!")) else: with open("screenlog.0") as f: lines = f.readlines() log_full = "" for string in lines: log_full += string if len(lines
) > 10: lines = lines[len(lines) - 10:len(lines)] log = "" for string in lines: log += string message_send = await client.send_message(message.channel, embed=discord.Embed( description="Uploading log to pastebin.com ...")) params = {"api_option": "paste", "api_dev_key": functions.get_settings()["secrets"]["pastebin"], "api_paste_code": log_full, "api_paste_private": "1", "api_paste_expire_date": "10M"} paste = requests.post("https://pastebin.com/api/api_post.php", data=params).text.replace( "https://pastebin.com/", "https://pastebin.com/raw/") await client.delete_message(message_send) await client.send_message(message.channel, "**Log of `screenlog.0`**\n*Full log file here: " + paste + "*\n\n" + "```" + log + "```")
#!/usr/bin/env python3 import calendar if __name__ == "__main__": for num in range(1, 13): month = calendar.month_
name[num]
print(f"{num:02} - {month}")
import theano import numpy as np from sklearn.preprocessing import OneHotEncoder from sklearn import cross_validation, metrics, datasets from neupy import algorithms, layers, environment environment.reproducible() theano.config.floatX = 'float32' mnist = datasets.fetch_mldata('MNIST original') target_scaler = OneHotEncoder() target = mnist.target.reshape((-1, 1)) target = target_scaler.fit_transform(target).todense() data = mnist.data / 255. data = data - data.mean(axis=0) x_train, x_test, y_train, y_test = cross_validation.train_test_split( data.astype(np.float32), target.astype(np.float32), train_size=(6 / 7.) ) network = algorithms.Momentum( [ layers.Relu(784),
la
yers.Relu(500), layers.Softmax(300), layers.ArgmaxOutput(10), ], error='categorical_crossentropy', step=0.01, verbose=True, shuffle_data=True, momentum=0.99, nesterov=True, ) network.train(x_train, y_train, x_test, y_test, epochs=20) y_predicted = network.predict(x_test) y_test = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test)) print(metrics.classification_report(y_test, y_predicted)) score = metrics.accuracy_score(y_test, y_predicted) print("Validation accuracy: {:.2f}%".format(100 * score))
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # # L
icensed under the Apache L
icense, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib.oauth1.core import * # flake8: noqa
self.time_now.mjd - predictive_mjd > auto_max_age): raise ValueError(INTERPOLATE_ERROR.format(auto_max_age)) def _refresh_table_as_needed(self, mjd): """Potentially update the IERS table in place depending on the requested time values in ``mjd`` and the time span of the table. For IERS_Auto the behavior is that the table is refreshed from the IERS server if both the following apply: - Any of the requested IERS values are predictive. The IERS-A table contains predictive data out for a year after the available definitive values. - The first predictive values are at least ``conf.auto_max_age days`` old. In other words the IERS-A table was created by IERS long enough ago that it can be considered stale for predictions. """ max_input_mjd = np.max(mjd) now_mjd = self.time_now.mjd # IERS-A table contains predictive data out for a year after # the available definitive values. fpi = self.meta['predictive_index'] predictive_mjd = self.meta['predictive_mjd'] # Update table in place if necessary auto_max_age = _none_to_float(conf.auto_max_age) # If auto_max_age is smaller than IERS update time then repeated downloads may # occur without getting updated values (giving a IERSStaleWarning). if auto_max_age < 10: raise ValueError('IERS auto_max_age configuration value must be larger than 10 days') if (max_input_mjd > predictive_mjd and (now_mjd - predictive_mjd) > auto_max_age): all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror) # Get the latest version try: filename = download_file( all_urls[0], sources=all_urls, cache="update") except Exception as err: # Issue a warning here, perhaps user is offline. An exception # will be raised downstream when actually trying to interpolate # predictive values. warn(AstropyWarning( f'failed to download {" and ".join(all_urls)}: {err}.\n' 'A coordinate or time-related ' 'calculation might be compromised or fail because the dates are ' 'not covered by the available IERS file. See the ' '"IERS data access" section of the astropy documentation ' 'for additional information on working offline.')) return new_table = self.__class__.read(file=filename) new_table.meta['data_url'] = str(all_urls[0]) # New table has new values? if new_table['MJD'][-1] > self['MJD'][-1]: # Replace *replace* current values from the first predictive index through # the end of the current table. This replacement is much faster than just # deleting all rows and then using add_row for the whole duration. new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right') n_replace = len(self) - fpi self[fpi:] = new_table[new_fpi:new_fpi + n_replace] # Sanity check for continuity if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d: raise ValueError('unexpected gap in MJD when refreshing IERS table') # Now add new rows in place for row in new_table[new_fpi + n_replace:]: self.add_row(row) self.meta.update(new_table.meta) else: warn(IERSStaleWarning( 'IERS_Auto predictive values are older than {} days but downloading ' 'the latest table did not find newer values'.format(conf.auto_max_age))) @classmethod def _substitute_iers_b(cls, table): """Substitute IERS B values with those from a real IERS B table. IERS-A has IERS-B values included, but for reasons unknown these do not match the latest IERS-B values (see comments in #4436). Here, we use the bundled astropy IERS-B table to overwrite the values in the downloaded IERS-A table. """ iers_b = IERS_B.open() # Substitute IERS-B values for existing B values in IERS-A table mjd_b = table['MJD'][np.isfinite(table['UT1_UTC_B'])] i0 = np.searchsorted(iers_b['MJD'], mjd_b[0], side='left') i1 = np.searchsorted(iers_b['MJD'], mjd_b[-1], side='right') iers_b = iers_b[i0:i1] n_iers_b = len(iers_b) # If there is overlap then replace IERS-A values from available IERS-B if n_iers_b > 0: # Sanity check that we are overwriting the correct values if not u.allclose(table['MJD'][:n_iers_b], iers_b['MJD']): raise ValueError('unexpected mismatch when copying ' 'IERS-B values into IERS-A table.') # Finally do the overwrite table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'] table['PM_X_B'][:n_iers_b] = iers_b['PM_x'] table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'] table['dX_2000A_B'][:n_iers_b] = iers_b['dX_2000A'] table['dY_2000A_B'][:n_iers_b] = iers_b['dY_2000A'] return table class earth_orientation_table(ScienceState): """Default IERS table for Earth rotation and reference systems service. These tables are used to calculate the offsets between ``UT1`` and ``UTC`` and for conversion to Earth-based coordinate
systems. The state itself is an IERS table, as an instance of one of the `~astropy.utils.iers.IERS` classes. The default, the auto-updating `~astropy.utils.iers.IERS_Auto` class, should suffice for most purposes. Examples -------- To temporarily use the IERS-B file packaged with astropy:: >>> from astropy.utils import iers >>> from astropy.time import Time >>> iers_b = iers.IERS_B.o
pen(iers.IERS_B_FILE) >>> with iers.earth_orientation_table.set(iers_b): ... print(Time('2000-01-01').ut1.isot) 2000-01-01T00:00:00.355 To use the most recent IERS-A file for the whole session:: >>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP >>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP <ScienceState earth_orientation_table: <IERS_A length=17463>...> To go back to the default (of `~astropy.utils.iers.IERS_Auto`):: >>> iers.earth_orientation_table.set(None) # doctest: +SKIP <ScienceState earth_orientation_table: <IERS_Auto length=17428>...> """ _value = None @classmethod def validate(cls, value): if value is None: value = IERS_Auto.open() if not isinstance(value, IERS): raise ValueError("earth_orientation_table requires an IERS Table.") return value class LeapSeconds(QTable): """Leap seconds class, holding TAI-UTC differences. The table should hold columns 'year', 'month', 'tai_utc'. Methods are provided to initialize the table from IERS ``Leap_Second.dat``, IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the list used by ERFA. Notes ----- Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or ``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu systems, ``/usr/share/zoneinfo/leap-seconds.list``). To prevent querying internet resources if the available local leap second file(s) are out of date, set ``iers.conf.auto_download = False``. This must be done prior to performing any ``Time`` scale transformations related to UTC (e.g. converting from UTC to TAI). """ # Note: Time instances in this class should use scale='tai' to avoid # needing leap seconds in their creation or inter
req + source_triple[-1] norm_source_freq = float(source_triple_freq) / float(patterns_freq) norm_target_freq = float(target_triple_freq) / float(patterns_freq) norm_source_freqs.append(norm_source_freq) norm_target_freqs.append(norm_target_freq) triples.append((source_triple, norm_source_freq)) self.norm_source_freq = sum(norm_source_freqs) self.norm_target_freq = sum(norm_target_freqs) self.triples = triples self.triples.sort(key=lambda triple: -triple[1]) class PatternSearchQuery(object): def __init__(self, key_term, seed_triple): self.seed_triple = seed_triple self.rel_type = seed_triple[0] self.arg_list = [] self.key_term = key_term for i in range(1, len(seed_triple) - 1): if seed_triple[i] != key_term and seed_triple[i] >= 0: self.arg_list.append((seed_triple[i], i)) else: self.key_term_i = i self.len_constraint_flt = lambda triple: len(triple) == len(self.seed_triple) self.self_filter = lambda triple: triple[self.key_term_i] != self.key_term def exact_pattern_match(self, triple): if len(self.seed_triple) != len(triple): return False for i in xrange(len(self.seed_triple)): if i != self.key_term_i and self.seed_triple[i] != triple[i]: return False return True def find_triples(self, engine, strict=True): triples = engine.search(rel_type=self.rel_type, arg_query=self.arg_list) triples = filter(self.self_filter, triples) if strict: triples = filter(self.len_constraint_flt, triples) triples = filter(self.exact_pattern_match, triples) return triples class TripleStoreExplorer(object): def __init__(self, search_engine, stop_terms=(), concept_net=()): self.engine = search_engine self.rel_id_map = REL_ID_MAP self.id_rel_map = ID_REL_MAP self.stop_terms = self.map_stop_terms(stop_terms) self.concept_net = self.map_concept_net(concept_net) def calc_term_triples_freq(self, term_id, threshold=0.0): triples_count = 0.0 triples_freq = 0.0 triples = self.engine.search(arg_query=(term_id,)) triples = filter(lambda tr: not self.is_light_triple(tr), triples) for triple in triples: triples_freq = triple[-1] if triples_freq > threshold: triples_count += 1 triples_freq += triple[-1] return triples_count, triples_freq def is_light_triple(self, triple): pos_tags = REL_POS_MAP[triple[0]] not_light = 0 for i in range(1, len(triple) - 1): if triple[i] not in self.stop_terms and pos_tags[i - 1] is not POS.PREP: not_light += 1 if not_light == 2: return False return True def find_triples_by_patterns(self, term_id, target_triples): siblings_dict = dict() siblings_num = 0 for target_triple in target_triples: query = PatternSearchQuery(term_id, target_triple) siblings = query.find_triples(self.engine, strict=False) siblings = filter(lambda tr: not self.is_light_triple(tr), siblings) siblings_num += len(siblings) pattern_freq = sum([triple[-1] for triple in siblings]) for sibling in siblings: source_id = sibling[query.key_term_i] if source_id >= 0: if source_id in siblings_dict: siblings_dict[source_id].append((target_triple, sibling, pattern_freq)) else: siblings_dict[source_id] = [(target_triple, sibling, pattern_freq)] return siblings_dict, siblings_num def map_stop_terms(self, stop_list_obj): stop_terms_ids = set() for term in stop_list_obj.stop_words: term_id = self.engine.term_id_map.get(term, -1) if term_id != -1: stop_terms_ids.add(term_id) logging.info("MAPPED %d/%d STOP TERMS" % (len(stop_terms_ids), len(stop_list_obj.stop_words))) for term in stop_list_obj.stop_words: term_id = self.engine.term_id_map.get(term, -1) # if term_id == -1: # logging.info("TERM NOT FOUND IN INDEX: %s" % term) stop_terms_ids.add(-1) return stop_terms_ids def map_concept_net(self, concept_net_obj): concept_net = dict() mapped = 0 for rel_type, arg1, arg2, pos in concept_net_obj.relations: arg_1_id = self.engine.term_id_map.get(arg1) arg_2_id = self.engine.term_id_map.get(arg2) if arg_1_id is not None and arg_2_id is not None: mapped += 1 if arg_1_id in concept_net: concept_net[arg_1_id].add(arg_2_id) else: concept_net[arg_1_id] = {arg_2_id} logging.info("USING %d RELATIONS FROM CONCEPT NET" % mapped) return concept_net def find_potential_sources(self, term, threshold=0): """ Find all potential sources for given target term and calculate their frequencies. """ target_term_id = self.engine.term_id_map.get(term) print "%r" % target_term_id, term if target_term_id is None: return None target_triples = self.engine.search(arg_query=(target_term_id,)) target_triples_num = len(target_triples) target_triples_freq = sum([target[-1] for target in target_triples]) print "\tTARGET: triples %d, frequency %d" % (target_triples_num, target_triples_freq) print "\tFOUND TARGET TRIPLES FOR %s: %d" % (term, len(target_triples)) target_triples = filter(lambda s: s[-1] >= threshold, target_triples) print "\tAFTER FILTERING (f>=%f): %d" % (threshold, len(target_triples)) target_triple
s = filter(lambda tr: not self.is_light_triple(tr), target_triples) print "\tAFTER IGNORING LIGHT TRIPLES: %d" % len(target_triples) source_triples, source_triple_num = self.find_triples_by_patterns(target_term_id, target_triples) print "\tFOUND SOURCE TRIPLES FOR %s: %d" % (term, source_triple_num) potential_sources = [] stops_ignored = 0 cnect_ignored = 0 for source_term_id, triples in source_triples.iteritems():
if source_term_id in self.stop_terms: stops_ignored += 1 continue if target_term_id in self.concept_net and source_term_id in self.concept_net[target_term_id]: cnect_ignored += 1 continue if source_term_id in self.concept_net and target_term_id in self.concept_net[source_term_id]: cnect_ignored += 1 continue new_source = PotentialSource(source_term_id, triples) new_source.calculate_freqs() potential_sources.append(new_source) print "\tSTOPS IGNORED: %d" % stops_ignored print "\tCONCEPT NET IGNORED: %d" % cnect_ignored # Other sorting options: # * triples_count # * total_pattern_source_triple_freq # * total_pattern_target_triple_freq # * norm_source_freq # * norm_target_freq potential_sources.sort(key=lambda source: -source.norm_source_freq) return potential_sources def format_source_output_line(self, potential_source): triples = potential_source.triples triples_str = "" for triple, norm_freq in triples: if triple[1] >= 0: triples_str += "{%s" % self.engine.id_term_map[triple[1]] else: triples_str += "{NONE" for term_id in triple[2:(len(triple) - 1)]: if term_id >= 0: triples_str += ";" + self.engine.id_term_map[term_id] else: triples_str += "NONE" triples_str += ";%.6f} " % norm_freq
# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log from manila.i18n import _LE from manila.i18n import _LW from manila.openstack.common.scheduler import filters LOG = log.getLogger(__name__) class CapacityFilter(filters.BaseHostFilter): """CapacityFilter filters based on share host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" share_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_LE("Free capacity not set: " "share node info collection broken.")) return False free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb reserved = float(host_state.reserved_percentage) / 100 if free_space in ('infinite', 'unknown'): # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space in ('infinite', 'unknown'): # NOTE(xyang): If total_space is 'infinite' or 'unknown' and # reserved is 0, we assume the back-ends can serve the request. # If total_space is 'infinite' or 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. return reserved == 0 total = float(total_space) if total <= 0: LOG.warning(_LW("Insufficient free space for share creation. " "Total capacity is %(total).2f on host %(host)s."), {"total": total, "host": host_state.host}) return False # NOTE(xyang): Calculate how much free space is left after taking # into account the reserved space. free = math.floor(free_space - total * reserved) msg_args = {"host": host_state.host, "requested": share_size, "available": free} LOG.debug("Space information for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) # NOTE(xyang): Only evaluate using max_over_subscription_ratio # if thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity would exceed # subscription ratio. # If max_over_subscription_ratio = 1, the provisioned_ratio # should still be limited by the max_over_subscription_ratio; # otherwise, it could result in infinite provisioning. if (host_state.thin_provisioning_support and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + share_size) / total) if provisioned_ratio > host_state.max_over_subscription_ratio: LOG.warning(_LW( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f would exceed the maximum over " "subscription ratio %(oversub_ratio).2f on host " "%(host)s."), {"provisi
oned_ratio": provisioned_ratio, "oversub_ratio": host_state.max_over_subscription_ratio, "host": host_state.host}) return False else: # NOTE(xyang): Adjust free_virtual calculation based on # free and max_over_subscription_ratio. adjusted_free_virtual = (
free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= share_size elif host_state.thin_provisioning_support: LOG.error(_LE("Invalid max_over_subscription_ratio: %(ratio)s. " "Valid value should be >= 1."), {"ratio": host_state.max_over_subscription_ratio}) return False if free < share_size: LOG.warning(_LW("Insufficient free space for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s"), msg_args) return False return True
""" altgraph.ObjectGraph - Graph of objects with an identifier ========================================================== A graph of objects that have a "graphident" attribute. graphident is the key for the object in the graph """ from __future__ import print_function from __future__ import absolute_import from altgraph import GraphError from altgraph.Graph import Graph from altgraph.GraphUtil import filter_stack from six.moves import map class ObjectGraph(object): """ A graph of objects that have a "graphident" attribute. graphident is the key for the object in the graph """ def __init__(self, graph=None, debug=0): if graph is None: graph = Graph() self.graphident = self self.graph = graph self.debug = debug self.indent = 0 graph.add_node(self, None) def __repr__(self): return '<%s>' % (type(self).__name__,) def flatten(self, condition=None, start=None): """ Iterate over the subgraph that is entirely reachable by condition starting from the given start node or the ObjectGraph root """ if start is None: start = self start = self.getRawIdent(start) return self.graph.iterdata(start=start, condition=condition) def nodes(self): for ident in self.graph: node = self.graph.node_data(ident) if node is not None: yield self.graph.node_data(ident) def get_edges(self, node): start = self.getRawIdent(node) _, _, outraw, incraw = self.graph.describe_node(start) def iter_edges(lst, n): seen = set() for tpl in (self.graph.describe_edge(e) for e in lst): ident = tpl[n] if ident not in seen: yield self.findNode(ident) seen.add(ident) return iter_edges(outraw, 3), iter_edges(incraw, 2) def edgeData(self, fromNode, toNode): start = self.getRawIdent(fromNode) stop = self.getRawIdent(toNode) edge = self.graph.edge_by_node(start, stop) return self.graph.edge_data(edge) def updateEdgeData(self, fromNode, toNode, edgeData): start = self.getRawIdent(fromNode) stop = self.getRawIdent(toNode) edge = self.graph.edge_by_node(start, stop) self.graph.update_edge_data(edge, edgeData) def filterStack(self, filters): """ Filter the ObjectGraph in-place by removing all edges to nodes that do not match every filter in the given filter list Returns a tuple containing the number of: (nodes_visited, nodes_removed, nodes_orphaned) """ visited, removes, orphans = filter_stack(self.graph, self, filters) for last_good, tail in orphans: self.graph.add_edge(last_good, tail, edge_data='orphan') for node in removes: self.graph.hide_node(node) return len(visited)-1, len(removes), len(orphans) def removeNode(self, node): """ Remove the given node from the graph if it exists """ ident = self.getIdent(node) if ident is not None: self.graph.hide_node(ident) def removeReference(self, fromnode, tonode): """ Remove all edges from fromnode to tonode """ if fromnode is None: fromnode = self fromident = self.getIdent(fromnode) toident = self.getIdent(tonode) if fromident is not None and toident is not None: while True: edge = self.graph.edge_by_node(fromident, toident) if edge is None: break self.graph.hide_edge(edge) def getIdent(self, node): """ Get the graph identifier for a node """ ident = self.getRawIdent(node) if ident is not None: return ident node = self.findNode(node) if node is None: return None return node.graphident def getRawIdent
(self, node): """ Get the identifier for a node obje
ct """ if node is self: return node ident = getattr(node, 'graphident', None) return ident def __contains__(self, node): return self.findNode(node) is not None def findNode(self, node): """ Find the node on the graph """ ident = self.getRawIdent(node) if ident is None: ident = node try: return self.graph.node_data(ident) except KeyError: return None def addNode(self, node): """ Add a node to the graph referenced by the root """ self.msg(4, "addNode", node) try: self.graph.restore_node(node.graphident) except GraphError: self.graph.add_node(node.graphident, node) def createReference(self, fromnode, tonode, edge_data=None): """ Create a reference from fromnode to tonode """ if fromnode is None: fromnode = self fromident, toident = self.getIdent(fromnode), self.getIdent(tonode) if fromident is None or toident is None: return self.msg(4, "createReference", fromnode, tonode, edge_data) self.graph.add_edge(fromident, toident, edge_data=edge_data) def createNode(self, cls, name, *args, **kw): """ Add a node of type cls to the graph if it does not already exist by the given name """ m = self.findNode(name) if m is None: m = cls(name, *args, **kw) self.addNode(m) return m def msg(self, level, s, *args): """ Print a debug message with the given level """ if s and level <= self.debug: print("%s%s %s" % (" " * self.indent, s, ' '.join(map(repr, args)))) def msgin(self, level, s, *args): """ Print a debug message and indent """ if level <= self.debug: self.msg(level, s, *args) self.indent = self.indent + 1 def msgout(self, level, s, *args): """ Dedent and print a debug message """ if level <= self.debug: self.indent = self.indent - 1 self.msg(level, s, *args)
"""Base test suite for extension arrays. These tests are intended for third-party libraries to subclass to validate that their extension arrays and dtypes satisfy the interface. Moving or renami
ng the tests
should not be done lightly. Libraries are expected to implement a few pytest fixtures to provide data for the tests. The fixtures may be located in either * The same module as your test class. * A ``conftest.py`` in the same directory as your test class. The full list of fixtures may be found in the ``conftest.py`` next to this file. .. code-block:: python import pytest from pandas.tests.extension.base import BaseDtypeTests @pytest.fixture def dtype(): return MyDtype() class TestMyDtype(BaseDtypeTests): pass Your class ``TestDtype`` will inherit all the tests defined on ``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype`` wherever the test requires it. You're free to implement additional tests. All the tests in these modules use ``self.assert_frame_equal`` or ``self.assert_series_equal`` for dataframe or series comparisons. By default, they use the usual ``pandas.testing.assert_frame_equal`` and ``pandas.testing.assert_series_equal``. You can override the checks used by defining the staticmethods ``assert_frame_equal`` and ``assert_series_equal`` on your base test class. """ from .casting import BaseCastingTests # noqa from .constructors import BaseConstructorsTests # noqa from .dtype import BaseDtypeTests # noqa from .getitem import BaseGetitemTests # noqa from .groupby import BaseGroupbyTests # noqa from .interface import BaseInterfaceTests # noqa from .methods import BaseMethodsTests # noqa from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa from .reduce import BaseNoReduceTests, BaseNumericReduceTests, BaseBooleanReduceTests # noqa from .missing import BaseMissingTests # noqa from .reshaping import BaseReshapingTests # noqa from .setitem import BaseSetitemTests # noqa
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015 BarraDev Consulting (<http://www.barradev.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GN
U Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licen
ses/>. # ############################################################################## from . import http_session_redis
# fallback if the requested version is 2 and there is no # 'volumev2' endpoint. if api_version['version'] == 2: try: cinder_url = base.url_for(request, 'volumev2') except exceptions.ServiceCatalogException: LOG.warning("Cinder v2 requested but no 'volumev2' service " "type available in Keystone catalog. Falling back " "to 'volume'.") if cinder_url == "": cinder_url = base.url_for(request, 'volume') except exceptions.ServiceCatalogException: LOG.debug('no volume service configured.') raise LOG.debug('cinderclient connection created using token "%s" and url "%s"' % (request.user.token.id, cinder_url)) c = api_version['client'].Client(request.user.username, request.user.token.id, project_id=request.user.tenant_id, auth_url=cinder_url, insecure=insecure, cacert=cacert, http_log_debug=settings.DEBUG) c.client.auth_token = request.user.token.id c.client.management_url = cinder_url return c def _replace_v2_parameters(data): if VERSIONS.active < 2: data['display_name'] = data['name'] data['display_description'] = data['description'] del data['name'] del data['description'] return data def volume_list(request, search_opts=None): """To see all volumes in the cloud as an admin you can pass in a special search option: {'all_tenants': 1} """ c_client = cinderclient(request) if c_client is Non
e: return [] return [Volume(v) for v in c_client.volumes.list(search_opts=search_opts)] def volume_get(request, volume_id): volume_data = cinderclient(request).volumes.get(volume_id) for attachment in volu
me_data.attachments: if "server_id" in attachment: instance = nova.server_get(request, attachment['server_id']) attachment['instance_name'] = instance.name else: # Nova volume can occasionally send back error'd attachments # the lack a server_id property; to work around that we'll # give the attached instance a generic name. attachment['instance_name'] = _("Unknown instance") return Volume(volume_data) def volume_create(request, size, name, description, volume_type, snapshot_id=None, metadata=None, image_id=None, availability_zone=None, source_volid=None): data = {'name': name, 'description': description, 'volume_type': volume_type, 'snapshot_id': snapshot_id, 'metadata': metadata, 'imageRef': image_id, 'availability_zone': availability_zone, 'source_volid': source_volid} data = _replace_v2_parameters(data) volume = cinderclient(request).volumes.create(size, **data) return Volume(volume) def volume_extend(request, volume_id, new_size): return cinderclient(request).volumes.extend(volume_id, new_size) def volume_delete(request, volume_id): return cinderclient(request).volumes.delete(volume_id) def volume_update(request, volume_id, name, description): vol_data = {'name': name, 'description': description} vol_data = _replace_v2_parameters(vol_data) return cinderclient(request).volumes.update(volume_id, **vol_data) def volume_reset_state(request, volume_id, state): return cinderclient(request).volumes.reset_state(volume_id, state) def volume_snapshot_get(request, snapshot_id): snapshot = cinderclient(request).volume_snapshots.get(snapshot_id) return VolumeSnapshot(snapshot) def volume_snapshot_list(request, search_opts=None): c_client = cinderclient(request) if c_client is None: return [] return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list( search_opts=search_opts)] def volume_snapshot_create(request, volume_id, name, description=None, force=False): data = {'name': name, 'description': description, 'force': force} data = _replace_v2_parameters(data) return VolumeSnapshot(cinderclient(request).volume_snapshots.create( volume_id, **data)) def volume_snapshot_delete(request, snapshot_id): return cinderclient(request).volume_snapshots.delete(snapshot_id) def volume_snapshot_update(request, snapshot_id, name, description): snapshot_data = {'name': name, 'description': description} snapshot_data = _replace_v2_parameters(snapshot_data) return cinderclient(request).volume_snapshots.update(snapshot_id, **snapshot_data) def volume_snapshot_reset_state(request, snapshot_id, state): return cinderclient(request).volume_snapshots.reset_state( snapshot_id, state) @memoized def volume_backup_supported(request): """This method will determine if cinder supports backup. """ # TODO(lcheng) Cinder does not expose the information if cinder # backup is configured yet. This is a workaround until that # capability is available. # https://bugs.launchpad.net/cinder/+bug/1334856 cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {}) return cinder_config.get('enable_backup', False) def volume_backup_get(request, backup_id): backup = cinderclient(request).backups.get(backup_id) return VolumeBackup(backup) def volume_backup_list(request): c_client = cinderclient(request) if c_client is None: return [] return [VolumeBackup(b) for b in c_client.backups.list()] def volume_backup_create(request, volume_id, container_name, name, description): backup = cinderclient(request).backups.create( volume_id, container=container_name, name=name, description=description) return VolumeBackup(backup) def volume_backup_delete(request, backup_id): return cinderclient(request).backups.delete(backup_id) def volume_backup_restore(request, backup_id, volume_id): return cinderclient(request).restores.restore(backup_id=backup_id, volume_id=volume_id) def tenant_quota_get(request, tenant_id): c_client = cinderclient(request) if c_client is None: return base.QuotaSet() return base.QuotaSet(c_client.quotas.get(tenant_id)) def tenant_quota_update(request, tenant_id, **kwargs): return cinderclient(request).quotas.update(tenant_id, **kwargs) def default_quota_get(request, tenant_id): return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id)) def volume_type_list(request): return cinderclient(request).volume_types.list() def volume_type_create(request, name): return cinderclient(request).volume_types.create(name) def volume_type_delete(request, volume_type_id): return cinderclient(request).volume_types.delete(volume_type_id) def volume_type_get(request, volume_type_id): return cinderclient(request).volume_types.get(volume_type_id) def volume_type_extra_get(request, type_id, raw=False): vol_type = volume_type_get(request, type_id) extras = vol_type.get_keys() if raw: return extras return [VolTypeExtraSpec(type_id, key, value) for key, value in extras.items()] def volume_type_extra_set(request, type_id, metadata): vol_type = volume_type_get(request, type_id) if not metadata: return None return vol_type.set_keys(metadata) def volume_type_extra_delete(request, type_id, keys): vol_type = volume_type_get(request, type_id) return vol_type.unset_keys([keys]) @memoized def tenant_absolute_limits(request): limits = cinderclient(request).limits.g
ght = ssd_config.negative_class_weight anchor_generator = anchor_generator_builder.build( ssd_config.anchor_generator) if feature_extractor.is_keras_model: ssd_box_predictor = box_predictor_builder.build_keras( conv_hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=anchor_generator .num_anchors_per_location(), box_predictor_config=ssd_config.box_predictor, is_training=is_training, num_classes=num_classes, add_background_class=ssd_config.add_background_class) else: ssd_box_predictor = box_predictor_builder.build( hyperparams_builder.build, ssd_config.box_predictor, is_training, num_classes, ssd_config.add_background_class) image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( ssd_config.post_processing) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize equalization_loss_config = ops.EqualizationLossConfig( weight=ssd_config.loss.equalization_loss.weight, exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) target_assigner_instance = target_assigner.TargetAssigner( region_similarity_calculator, matcher, box_coder, negative_class_weight=negative_class_weight) ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch kwargs = {} return ssd_meta_arch_fn( is_training=is_training, anchor_generator=anchor_generator, box_predictor=ssd_box_predictor, box_coder=box_coder, feature_extractor=feature_extractor, encode_background_as_zeros=encode_background_as_zeros, image_resizer_fn=image_resizer_fn, non_max_suppression_fn=non_max_suppression_fn, score_conversion_fn=score_conversion_fn, classification_loss=classification_loss, localization_loss=localization_loss, classification_loss_weight=classification_weight, localization_loss_weight=localization_weight, normalize_loss_by_num_matches=normalize_loss_by_num_matches, hard_example_miner=hard_example_miner, target_assigner_instance=target_assigner_instance, add_summaries=add_summaries, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, add_background_class=ssd_config.add_background_class, explicit_background_class=ssd_config.explicit_background_class, random_example_sampler=random_example_sampler, expected_loss_weights_fn=expected_loss_weights_fn, use_confidences_as_targets=ssd_config.use_confidences_as_targets, implicit_example_weight=ssd_config.implicit_example_weight, equalization_loss_config=equalization_loss_config, **kwargs) def _build_faster_rcnn_feature_extractor( feature_extractor_config, is_training, reuse_weights=None, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. reuse_weights: if the feature extractor should reuse weights. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights) def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): """Builds a Faster R-CNN or R-FCN detection model based on the model config. Builds R-FCN model if the second_stage_box_predictor in the config is of type `rfcn_box_predictor` else builds a Faster R-CNN model. Args: frcnn_config: A faster_rcnn.proto object containing the config for the desired FasterRCNNMetaArch or RFCNMetaArch. is_training: True if t
his model is being
built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: FasterRCNNMetaArch based on the config. Raises: ValueError: If frcnn_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = frcnn_config.num_classes image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) feature_extractor = _build_faster_rcnn_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) number_of_stages = frcnn_config.number_of_stages first_stage_anchor_generator = anchor_generator_builder.build( frcnn_config.first_stage_anchor_generator) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) first_stage_box_predictor_kernel_size = ( frcnn_config.first_stage_box_predictor_kernel_size) first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size use_static_shapes = frcnn_config.use_static_shapes and ( frcnn_config.use_static_shapes_for_eval or is_training) first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.first_stage_positive_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) first_stage_max_proposals = frcnn_config.first_stage_max_proposals if (frcnn_config.first_stage_nms_iou_threshold < 0 or frcnn_config.first_stage_nms_iou_threshold > 1.0): raise ValueError('iou_threshold not in [0, 1.0].') if (is_training and frcnn_config.second_stage_batch_size > first_stage_max_proposals): raise ValueError('second_stage_batch_size should be no greater than ' 'first_stage_max_proposals.') first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=frcnn_config.first_stage_nms_score_threshold, iou_thresh=frcnn_config.first_stage_nms_iou_threshold, max_size_per_class=frcnn_config.first_stage_max_proposals, max_total_size=frcnn_config.first_stage_max_proposals, use_static_shapes=use_static_shapes) first_stage_loc_loss_weight = ( frcnn_config.first_stage_localization_loss_weight) first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2011 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not u
se this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, e
ither express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.contrib import messages from django import shortcuts import openstackx import openstack class User(object): def __init__(self, token=None, user=None, tenant_id=None, admin=None, service_catalog=None, tenant_name=None): self.token = token self.username = user self.tenant_id = tenant_id self.tenant_name = tenant_name self.admin = admin self.service_catalog = service_catalog def is_authenticated(self): # TODO: deal with token expiration return self.token def is_admin(self): return self.admin def get_user_from_request(request): if 'user' not in request.session: return User() return User(token=request.session['token'], user=request.session['user'], tenant_id=request.session['tenant_id'], tenant_name=request.session['tenant'], admin=request.session['admin'], service_catalog=request.session['serviceCatalog']) class LazyUser(object): def __get__(self, request, obj_type=None): if not hasattr(request, '_cached_user'): request._cached_user = get_user_from_request(request) return request._cached_user class AuthenticationMiddleware(object): def process_request(self, request): request.__class__.user = LazyUser() def process_exception(self, request, exception): if type(exception) in [openstack.compute.exceptions.Forbidden, openstackx.api.exceptions.Forbidden]: # flush other error messages, which are collateral damage # when our token expires for message in messages.get_messages(request): pass messages.error(request, 'Your token has expired.\ Please log in again') return shortcuts.redirect('/auth/logout')
e'] for o in orders] existing_order_ids = sale_obj.search(cr, uid, [('name', 'in', submitted_references)], context = context) existing_orders = sale_obj.read(cr, uid, existing_order_ids, ['name'], context=context) existing_references = set([o['name'] for o in existing_orders]) orders_to_save = [o for o in orders if o['sale_code'] not in existing_references] return orders_to_save def search_import_orders(self, cr, uid, ids, status = 'WAIT_SELLER_SEND_GOODS', date_start = None, date_end = None, context=None): """ 搜索订单,批量导入 """ port = 80 shop = self.browse(cr, uid, ids[0], context = context) setDefaultAppInfo(shop.appkey, shop.appsecret) req = TradesSoldIncrementGetRequest(shop.apiurl,port) req.fields="seller_nick,buyer_nick,created,sid,tid,status,buyer_memo,seller_memo,payment,discount_fee,adjust_fee,post_fee,total_fee, pay_time,end_time,modified,received_payment,price,alipay_id,receiver_name,receiver_state,receiver_city,receiver_district,receiver_address, receiver_zip,receiver_mobile,receiver_phone,orders.price,orders.num,orders.iid,orders.num_iid,orders.sku_id,orders.refund_status,orders.status,orders.oid, orders.total_fee,orders.payment,orders.discount_fee,orders.adjust_fee,orders.sku_properties_name,orders.outer_iid,orders.outer_sku_id" req.status = status if date_start: date_start = (datetime.strptime(str(date_start), '%Y-%m-%d %H:%M:%S',) + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') req.start_modified = date_start if date_end: date_end = (datetime.strptime(str(date_end), '%Y-%m-%d %H:%M:%S',) + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') req.end_modified = date_end res = [] req.page_no = 1 req.page_size = 100 # 淘宝沙箱环境不支持use_has_next 参数 # req.use_has_next = True # has_next = True # while has_next: # resp= req.getResponse(shop.sessionkey) # trades = resp.get('trades_sold_get_response').get('trades', False) # if trades: # res += trades.get('trade') # req.page_no += 1 # has_next = resp.get('trades_sold_get_response').get('has_next', False) total_get = 0 total_results = 100 while total_get < total_results: resp= req.getResponse(shop.sessionkey) trades = resp.get('trades_sold_increment_get_response').get('trades', False) total_results = resp.get('trades_sold_increment_get_response').get('total_results') if total_results > 0: res += trades.get('trade') total_get += req.page_size req.page_no = req.page_no + 1 # 时间需要减去8小时 # 单号加上店铺前缀 order_ids = [] for trade in res: trade['created'] = (datetime.strptime(trade['created'], '%Y-%m-%d %H:%M:%S',) - timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') trade['pay_time'] = (datetime.strptime(trade['pay_time'], '%Y-%m-%d %H:%M:%S',) - timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') trade['sale_code'] = '%s_%s' % (shop.code, trade['tid']) orders = self.remove_duplicate_orders(cr, uid, res, context=context) for trade in orders: try: #创建Partner partner_id, address_id = self.create_partner_address(cr, uid, shop.code, trade, context = context ) #创建订单及明细行 order_id = self.create_order(cr, uid, shop, partner_id, address_id, trade, context = context ) order_ids.append(order_id) except Exception, e: #写入 同步异常日志 syncerr = u"店铺【%s】订单【%s】同步错误: %s" % (shop.name, trade['tid'], e) self.pool.get('ebiz.syncerr').create(cr, uid, {'name':syncerr, 'shop_id': shop.id, 'type': 'order', 'state': 'draft' }, context = context ) continue return order_ids def _order_offline_send(self, cr, uid, shop, tid, logistics_company, logistics_id, context=None): setDefaultAppInfo(shop.appkey, shop.appsecret) req = LogisticsOfflineSendRequest(shop.apiurl, 80) req.tid = tid req.out_sid = logistics_id req.company_code = logistics_company try: resp = req.getResponse(shop.sessionkey) except Exception,e: #写入 同步异常日志 syncerr = u"店铺【%s】订单【%s】物流发货同步错误: %s" % (shop.name, tid, e) self.pool.get('ebiz.syncerr').create(cr, uid, {'name':syncerr, 'shop_id': shop.id, 'type': 'delivery', 'state': 'draft' }, context = context ) return False return True def orders_offline_send(self, cr, uid, ids, order_ids, context=None): """ 订单发货信息更新到电商平台 """ order_obj = self.pool.get('sale.order') picking_obj = self.pool.get('stock.picking') for order in order_obj.browse(cr, uid, order_ids, context = context): if not order.shop_id or not order.picking_ids or not order.shipped: continue shop = order.shop_id picking = order.picking_ids[0] delivery_code = picking.carrier_tracking_ref partner_ref = picking.carrier_id and picking.carrier_id.partner_id.ref
if not delivery_code or not partner_ref: syncerr = u"店铺【%s】订单【%s】物流发货同步错误: 对应的发货单没有运单号,或者没有快递方式,或者快递方式的快递公司(Partner)没有填写’物流公司代码‘(Ref字段)!" % (shop.name, order.name) self.pool.get('ebiz.syncerr').create(cr, uid, {'name':syncerr, 'shop_id': shop.id, 'type': 'delivery', 'state': 'd
raft' }, context = context ) continue #tid 格式为 店铺前缀_电商订单编号,如果是合并订单,则格式为 店铺前缀mg_流水号 i = order.name.find('_') if i <= 0: continue tid = order.name[i+1:] if order.name[:i].endswith('mg'): #处理合并订单 if not order.origin: syncerr = u"店铺【%s】订单【%s】物流发货同步错误: 合并订单的源单据中没有原始订单号!" % (shop.name, order.name) self.pool.get('ebiz.syncerr').create(cr, uid, {'name':syncerr, 'shop_id': shop.id, 'type': 'delivery', 'state': 'draft' }, context = context ) continue tids = order.origin.split(',') for t in tids: i = t.find('_') if i <= 0: continue tid = t[i+1:] self._order_offline_send(cr, uid, shop, tid, partner_ref, delivery_code, context=context) else: self._order_offline_send(cr, uid, shop, tid, partner_ref, delivery_code, context=context) return True def _order_signed(self, cr, uid, shop, order): #tid 格式为 店铺前缀_电商订单编号,如果是合并订单,则格式为 店铺前缀mg_流水号 signed = True setDefaultAppInfo(shop.appkey, shop.appsecret) req = TradeGetRequest(shop.apiurl) req.fields="tid, modified, consign_time, status" i = order.name.find('_') if i <= 0: signed = False tid = order.name[i+1:] if order.name[:i].endswith('mg'): #处理合并订单 if not order.origin: syncerr = u"店铺【%s】订单【%s】买家签收同步错误: 合并订单的源单据中没有原始订单号!" % (shop.name, order.name) self.pool.get('ebiz.syncerr').create(cr, uid, {'name':syncerr, 'shop_id': shop.id, 'type': 'invoice', 'state': 'draft' }, context = context ) signed = False tids = order.origin.split(',') for t in tids: i = t.find('_') if i <= 0: signed = False continue tid = t[i+1:] req.tid = long(tid) resp = req.getResponse(shop.sessionkey) trade = resp.get('trade_get_response') and resp.get('trade_get_response').get('trade') if not trade or trade['status'] != 'TRADE_FINISHED': signed = False continue else: req.tid = long(tid) resp = req.getResponse(shop.sessionkey)
"""JSON implementation of pystorm serializer""" from __future__ import absolute_import, print_function, unicode_literals import io import logging import simplejson as json from six import PY2 from ..exceptions import StormWentAwayError from .serializer import Serializer log = logging.getLogger(__name__) class JSONSerializer(Serializer): def __init__(self, input_stream, output_stream, reader_lock, writer_lock): super(JSONSerializer, self).__init__( input_stream, output_stream, reader_lock, writer_lock ) self.input_stream = self._wrap_stream(input_stream) self.output_stream = self._wrap_stream(output_stream) @staticmethod def _wrap_stream(stream): """Returns a TextIOWrapper around the given stream that handles UTF-8 encoding/decoding. """ if hasattr(stream, "buffer"): return io.TextIOWrapper(stream.buffer, encoding="utf-8") elif hasattr(stream, "readable"): return io.TextIOWrapper(stream, encoding="utf-8") # Python 2.x stdin and stdout are just files
else: return io.open(stream.fileno(), mode=stream.mode, encoding="utf-8") def read_message(self): """The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786
537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed. """ msg = "" num_blank_lines = 0 while True: # readline will return trailing \n so that output is unambigious, we # should only have line == '' if we're at EOF with self._reader_lock: line = self.input_stream.readline() if line == "end\n": break elif line == "": raise StormWentAwayError() elif line == "\n": num_blank_lines += 1 if num_blank_lines % 1000 == 0: log.warn( "While trying to read a command or pending task " "ID, Storm has instead sent %s '\\n' messages.", num_blank_lines, ) continue msg = "{}{}\n".format(msg, line[0:-1]) try: return json.loads(msg) except Exception: log.error("JSON decode error for message: %r", msg, exc_info=True) raise def serialize_dict(self, msg_dict): """Serialize to JSON a message dictionary.""" serialized = json.dumps(msg_dict, namedtuple_as_object=False) if PY2: serialized = serialized.decode("utf-8") serialized = "{}\nend\n".format(serialized) return serialized
import tempf
ile import shutil import sys from unittest impor
t mock import pytest from tools.wpt import run from tools import localpaths # noqa: F401 from wptrunner.browsers import product_list @pytest.fixture(scope="module") def venv(): from tools.wpt import virtualenv class Virtualenv(virtualenv.Virtualenv): def __init__(self): self.path = tempfile.mkdtemp() self.skip_virtualenv_setup = False def create(self): return def activate(self): return def start(self): return def install(self, *requirements): return def install_requirements(self, requirements_path): return venv = Virtualenv() yield venv shutil.rmtree(venv.path) @pytest.fixture(scope="module") def logger(): run.setup_logging({}) @pytest.mark.parametrize("platform", ["Windows", "Linux", "Darwin"]) def test_check_environ_fail(platform): m_open = mock.mock_open(read_data=b"") with mock.patch.object(run, "open", m_open): with mock.patch.object(run.platform, "uname", return_value=(platform, "", "", "", "", "")): with pytest.raises(run.WptrunError) as excinfo: run.check_environ("foo") assert "wpt make-hosts-file" in str(excinfo.value) @pytest.mark.parametrize("product", product_list) def test_setup_wptrunner(venv, logger, product): if product == "firefox_android": pytest.skip("Android emulator doesn't work on docker") parser = run.create_parser() kwargs = vars(parser.parse_args(["--channel=nightly", product])) kwargs["prompt"] = False # Hack to get a real existing path kwargs["binary"] = sys.argv[0] kwargs["webdriver_binary"] = sys.argv[0] if kwargs["product"] == "sauce": kwargs["sauce_browser"] = "firefox" kwargs["sauce_version"] = "63" run.setup_wptrunner(venv, **kwargs)
import logging import sqlite3 from pyfcm import FCMNotification def insert_token(token): try: con = sqlite3.connect('fcm.db') cur = con.cursor() cur.execute('CREATE TABLE I
F NOT EXISTS tokens(token TEXT)') cur.execute('INSERT INTO tokens VALUES (?)', (token, )) con.commit() finally: if cur: cur.close() if con: con.close
() def notify_all(message_title=None, message_body=None): con = sqlite3.connect('fcm.db') con.row_factory = lambda cursor, row: row[0] cur = con.cursor() cur.execute('CREATE TABLE IF NOT EXISTS tokens(token TEXT)') cur.execute('SELECT * FROM tokens') registration_ids = [row for row in cur.fetchall()] if len(registration_ids) > 0: noti = FCMNotification('API-KEY') result = noti.notify_multiple_devices(registration_ids=registration_ids, message_title=message_title, message_body=message_body) return result
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Compassion CH (http://www.compassion.ch) # Releasi
ng children from poverty in Jesus' name # @author: David Coninckx <david@coninckx.com> # # The licence is in the file __openerp__.py # ############################################################################## from . i
mport contracts from . import project_compassion from . import ir_ui_menu
# Copyright 2013,2014 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Dunya # # Dunya is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License as published by the Free Software # Foundation (FSF), either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see http://www.gnu.org/licenses/ from django.conf.urls import url from rest_framework.urlpatterns import format_suffix_patterns import andalusian.api mbid_match = r'(?P<mbid>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})' uuid_match = r'(?P<uuid>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})' urlpatterns = [ url(r'^orchestra$', andalusian.api.OrchestraList.as_view(), name='api-andalusian-raaga-list'), url(r'^orchestra/%s$' % mbid_match, andalusian.api.OrchestraDetail.as_view(), name='api-andalusian-raaga-detail'), url(r'^artist$', andalusian.api.ArtistList.as_view(), name='api-andalusian-taala-list'), url(r'^artist/%s$' % mbid_match, andalusian.api.ArtistDetail.as_view(), name='api-andalusian-taala-detail'), url(r'^album$', andalusian.api.AlbumList.as_view(), name='api-andalusian-instrument-list'), url(r'^album/%s$' % mbid_match, andalusian.api.AlbumDetail.as_view(), name='api-andalusian-instrument-detail'), url(r'^work$', andalusian.api.WorkList.as_view(), name='api-andalusian-work-list'), url(r'^work/%s$' % mbid_match, andalusian.api.WorkDetail.as_view(), name='api-andalusian-work-detail'), url(r'^genre$', andalusian.api.GenreList.as_view(), name='api-andalusian-genre-list'), url(r'^genre/(?P<pk>\d+)$', andalusian.api.GenreDetail.as_view(), name='api-andalusian-genre-detail'), url(r'^recording$', andalusian.api.RecordingList.as_view(), name='api-andalusian-recording-list'), url(r'^recording/%s$' % mbid_match, andalusian.api.RecordingDetail.as_view(), name='api-andalusian-recording-detail'), url(r'^recording/%s/lyric$' % mbid_match, andalusian.api.LyricDetail.as_view(), name='api-andalusian-lyric-detail'), url(r'^instrument$', andalusian.api.InstrumentList.as_view(), name='api-andalusian-instrument-list'), url(r'^instrument/%s$' % mbid_match, andalusian.api.InstrumentDetail.as_view
(), name='api-andalusian-instrument-detail'), url(r'^tab$', andalus
ian.api.TabList.as_view(), name='api-andalusian-tab-list'), url(r'^tab/%s$' % uuid_match, andalusian.api.TabDetail.as_view(), name='api-andalusian-tab-detail'), url(r'^mizan$', andalusian.api.MizanList.as_view(), name='api-andalusian-mizan-list'), url(r'^mizan/%s$' % uuid_match, andalusian.api.MizanDetail.as_view(), name='api-andalusian-mizan-detail'), url(r'^nawba$', andalusian.api.NawbaList.as_view(), name='api-andalusian-nawba-list'), url(r'^nawba/%s$' % uuid_match, andalusian.api.NawbaDetail.as_view(), name='api-andalusian-nawba-detail'), url(r'^form$', andalusian.api.FormList.as_view(), name='api-andalusian-form-list'), url(r'^form/%s$' % uuid_match, andalusian.api.FormDetail.as_view(), name='api-andalusian-form-detail'), url(r'^sanaa$', andalusian.api.SanaaList.as_view(), name='api-andalusian-sanaa-list'), url(r'^sanaa/(?P<pk>\d+)$', andalusian.api.SanaaDetail.as_view(), name='api-andalusian-sanaa-detail'), url(r'^poem$', andalusian.api.PoemList.as_view(), name='api-andalusian-poem-list'), url(r'^poem/(?P<pk>\d+)$', andalusian.api.PoemDetail.as_view(), name='api-andalusian-poem-detail'), ] urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])
import _plotly_utils.basevalidators class FontValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="font", parent_name="box.hoverlabel", **kwargs): super(FontValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Font"), data_docs=kwargs.pop( "data_docs", """ color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applie
d by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multip
le font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . """, ), **kwargs )
'''OpenGL extension OES.read_format This module customises the behaviour of the OpenGL.raw.GLES1.OES.read_format to provide a more Python-friendly API The official definition of
this extension is available here: http://www.opengl.org/registry/specs/OES/read_format.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES1 import _types, _glgets from OpenGL.raw.GLES1.OES.read_format import * from OpenGL.raw.GLES1.
OES.read_format import _EXTENSION_NAME def glInitReadFormatOES(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) from . import * # Signals from .signal import (Signal, EpicsSignal, EpicsSignalRO, DerivedSignal) # Po
sitioners from .positioner import (PositionerBase, SoftPositioner) from .epics_motor import EpicsMotor from .pv_positioner import (PVPositioner, PVPositionerPC) from .pseudopos import (PseudoPositioner, PseudoSingle) # Devices from .scaler import EpicsScaler from .device import (Device, Component, FormattedComponent, Dyna
micDeviceComponent) from .status import StatusBase from .mca import EpicsMCA, EpicsDXP # Areadetector-related from .areadetector import * from ._version import get_versions from .commands import (mov, movr, set_pos, wh_pos, set_lm, log_pos, log_pos_diff, log_pos_mov) from .utils.startup import setup as setup_ophyd __version__ = get_versions()['version'] del get_versions
t(0,0,0,0) # charger images mur = pygame.image.load(SOURCE_IMG + 'mur.jpg').convert() mur50 = pygame.image.load(SOURCE_IMG + 'mur50.jpg').convert() caisse = pygame.image.load(SOURCE_IMG + 'caisse.jpg').convert() caisse50 = pygame.image.load(SOURCE_IMG + 'caisse50.jpg').convert() caisse_ok = pygame.image.load(SOURCE_IMG + 'caisse_ok.jpg').convert() caisse_ok50 = pygame.image.load(SOURCE_IMG + 'caisse_ok50.jpg').convert() objectif = pygame.image.load(SOURCE_IMG + 'objectif.png').convert_alpha() objectif50 = pygame.image.load(SOURCE_IMG + 'objectif50.png').convert_alpha() mario = pygame.image.load(SOURCE_IMG + 'mario_bas.gif').convert_alpha() mario50 = pygame.image.load(SOURCE_IMG + 'mario_bas50.gif').convert_alpha() quadrillage = pygame.image.load(SOURCE_IMG + 'quadrillage.png').convert_alpha() # objet par défaut objet = MUR # load map chargeCarte(carte, levelNumber) # search mario for i in range(NB_BLOCS_LARGEUR): for j in range(NB_BLOC
S_HAUTEUR): if carte[i][j] ==MARIO: alsoMario += 1 # white Bar whiteBar = pygame.Surface((screen.get_width(), 60), screen.get_flags()) whiteBar
.fill(WHITE) # police police = pygame.font.Font('angelina.ttf', 20) # define sourceFile default pathFile = printLang(lang) # 'fr' ou 'en' sourceFile = SOURCE_FILE + pathFile + '/edit.lvl' # './files/'fr' ou 'en'/edit.lvl' # H: Help Level: Saved ESC: Exit ou H: Aide Niveau: Sauve ESC: Quitter # nombre de lignes lignes = compteLignes(sourceFile) tableau = [Text() for i in range(lignes)] # initialise tableau en fr ou en initialiseEditTable(sourceFile,lignes,tableau) levelWord = tableau[1].data tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) # event continuer = True while(continuer): # check if there is mario on the map if not initialize the boolean if(objet == MARIO and alsoMario != 0): for i in range(NB_BLOCS_LARGEUR): for j in range(NB_BLOCS_LARGEUR): if carte[i][j]==MARIO: restMario += 1 if restMario == 0: alsoMario = 0 restMario=0 for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): continuer = False # sortie de la boucle if event.type == KEYDOWN: if event.key == K_ESCAPE: continuer = False elif event.key == K_1 or event.key == K_KP1: objet = MUR elif event.key == K_2 or event.key == K_KP2: objet = CAISSE elif event.key == K_3 or event.key == K_KP3: objet = OBJECTIF elif event.key == K_4 or event.key == K_KP4: objet = MARIO elif event.key == K_5 or event.key == K_KP5: objet = CAISSE_OK elif event.key == K_h and lang == EN: aide(screen,mode,lang,langu) elif event.key == K_a and lang == FR: aide(screen,mode,lang,langu) elif event.key == K_s: saved = True sauveCarte(carte,levelNumber) elif event.key == K_PAGEUP: if levelNumber <= levelFinal: levelNumber += 1 if levelNumber == levelFinal+ 1: carte = [[MUR for lgn in range(NB_BLOCS_HAUTEUR)]for col in range(NB_BLOCS_LARGEUR)] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) break else: # add level number to tableau[1] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) chargeCarte(carte, levelNumber) elif event.key == K_PAGEDOWN: if levelNumber > 1: levelNumber -=1 # add level number to tableau[1] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) chargeCarte(carte, levelNumber) if event.type == MOUSEBUTTONDOWN: motionY, motionX = event.pos if motionX <= 408 and motionY <= 408: if event.button == RIGHT: clicDroitEnCours = True carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = VIDE if event.button == LEFT: clicGaucheEnCours = True if objet == MARIO and alsoMario != 0: # mario can be put only once. continue else: carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = objet if objet == MARIO: alsoMario +=1 if event.type == MOUSEBUTTONUP: if event.button == LEFT: clicGaucheEnCours = False elif event.button == RIGHT: clicDroitEnCours = False if event.type == MOUSEMOTION: motionX, motionY = event.pos exemplePos.x = motionX + 20 exemplePos.y = motionY + 20 # screen screen.fill(BLACK) # Ecran tout noir # affichage carte for lgn in range (NB_BLOCS_HAUTEUR): for col in range (NB_BLOCS_LARGEUR): objectPos.x = col * TAILLE_BLOC objectPos.y = lgn * TAILLE_BLOC if carte[lgn][col] == MUR: screen.blit(mur, objectPos) elif carte[lgn][col] == CAISSE: screen.blit(caisse,objectPos) elif carte[lgn][col] == CAISSE_OK: screen.blit(caisse_ok,objectPos) elif carte[lgn][col] == OBJECTIF: screen.blit(objectif,objectPos) elif carte[lgn][col] == MARIO: screen.blit(mario, objectPos) screen.blit(quadrillage, (0, 0)) # whiteBar objectPos.x = 0 objectPos.y = screen.get_height() - whiteBar.get_height() screen.blit(whiteBar,objectPos) # text objectPos.x = 10 objectPos.y = (screen.get_height() - whiteBar.get_height()) + 5 screen.blit(tableau[0].partie,objectPos) objectPos.x = 100 screen.blit(tableau[1].partie,objectPos) if saved: objectPos.x = 200 screen.blit(tableau[2].partie,objectPos) objectPos.x = (screen.get_width() - tableau[3].partie.get_width()) - 10 screen.blit(tableau[3].partie,objectPos) # blit exemple if objet == MUR: screen.blit(mur50, exemplePos) elif objet == CAISSE: screen.blit(caisse50, exemplePos) elif objet == CAISSE_OK: screen.blit(caisse_ok50, exemplePos) elif objet == OBJECTIF: screen.blit(objectif50, exemplePos) elif objet == MARIO: screen.blit(mario50, exemplePos) # mise a jour affichage de l'écran --------------------- pygame.display.flip() if saved: pygame.time.delay(2000) objectPos.x = 10 objectPos.y = (screen.get_height() - whiteBar.get_height()) + 5 screen.blit(tableau[0].partie, objectPos) objectPos.x = 100 screen.blit(tableau[1].partie, objectPos) objectPos.x = (screen.get_width()
# -*- coding: utf-8 -*- from distu
tils.core import setup setup( name='popy', description='Parser for GNU Po files', long_description=open('README.rst').read(), version='0.3.0',
packages=['popy'], author='Murat Aydos', author_email='murataydos@yandex.com', url='https://github.com/murataydos/popy', license='MIT', zip_safe=False, include_package_data=True )
#!/usr/bin/env python import jumeg import os.path raw_fname = "109925_CAU01A_100715_0842_2_c,rfDC-raw.fif" if not os.path.isfile(raw_fname): print "Please find the test file at the below location on the meg_store2 network drive - \ cp /data/meg_store2/fif_data/jumeg_test_data/109925_CAU01A_100715_0842_2_c,rfDC-raw.fif ." # Function to check and explain the file naming standards #jumeg.jumeg_utils.check_jumeg_standards(raw_fname) # Function to apply noise reducer jumeg.jumeg_noise_reducer.noise_reducer(raw_fname, verbose=True) # Filter functions #jumeg.jumeg_preprocessing.apply_filter(raw_fname) fclean = raw_fname[:raw_fname.rfind('-raw.fif')] + ',bp1-45Hz-raw.fif' # Evoked functions #jumeg.jumeg_preprocessing.apply_average(fclean) # ICA functions #jumeg.jumeg_preprocessing.apply_ica(fclean) fica_name = fclean[:fclean.rfind('-raw.fif')] + '-ica.fif' # Perform ECG/EOG rejection using ICA #jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name) #jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name, unfiltered=True) # OCARTA cleaning from jumeg.decompose import ocarta ocarta_obj
= ocarta.JuMEG_ocarta() ocarta_obj.fit(fclean, unfiltered=False, verbose=True) # CTPS functions #jumeg.jumeg_preprocessing.apply_ctps(fica_nam
e) fctps_name = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger.npy' #jumeg.jumeg_preprocessing.apply_ctps_select_ic(fctps_name) # Function recompose brain response components only fname_ctps_ics = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger-ic_selection.txt' #jumeg.jumeg_preprocessing.apply_ica_select_brain_response(fname_ctps_ics) # Function to process empty file empty_fname = '109925_CAU01A_100715_0844_2_c,rfDC-empty.fif' #jumeg.jumeg_preprocessing.apply_create_noise_covariance(empty_fname, verbose=True)
from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from .mixins import GenerateActionMixin class DetailViewWithActionStream(GenerateActionMixin, DetailView): def dispatch(self, request, *args, **kwargs): if not self.request.user.is_anonymous(): self.generate_action() return super(DetailViewWithActionStream, self).dispatch(request, *args, **kwargs) def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *args, **kwargs): return 'viewed' def get_action_action_object(self, *args, **kwargs): return self.get_object() class CreateViewWithActionStream(GenerateActionMixin, CreateView): def form_valid(self, form): to_return = super(CreateViewWithActionStream, self).form_valid(form) if not self.request.user.is_anonymous(): self.generate_action() return to_return def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *args, **kwargs): return 'added' def get_action_action_object(self, *args, **kwargs): return self.object class UpdateViewWithActionStream(GenerateActionMixin, UpdateView): def form_val
id(self, form): to_return = super(UpdateViewWithActionStream, self).form_valid(form) if not self.request.user.is_anonymous(): self.generate_action() return to_return def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *
args, **kwargs): return 'updated' def get_action_action_object(self, *args, **kwargs): return self.get_object()
def get_duplicates(self, ids, tolerance, fast=False): dupes_dict = {} dupes_list = [] values = {} for i in ids: values[i] = self.value(i) selection = self.ids_sorted(ids) print('Searching duplicates in %d structures' % len(selection)) for i in range(len(selection) - 1): entry_id = selection[i] value_i = values[entry_id] for j in range(i + 1, len(selection)): entry_jd = selection[j] if fast and entry_jd in dupes_list: continue value_j = values[entry_jd] if abs(value_i - value_j) < self.value_tol: distance = self.distance(entry_id, entry_jd) if distance < tolerance: if ent
ry_id in dupes_dict: dupes_dict[entry_id].append(entry_jd) else: dupes_dict[entry_id] = [entry_jd]
dupes_list.append(entry_jd) return dupes_dict, [x for x in selection if x in dupes_list] def cleaned_from_duplicates(self, ids): selection = self.ids_sorted(ids) duplicates_dict = self.check_duplicates(selection) return [x for x in selection if x not in duplicates_dict.keys()] def diff_values_matrix(self): members = self.members ret = np.zeros((len(members), len(members))) for i in range(len(members)): for j in range(i, len(members)): if self.value(members[i]) is not None and self.value(members[j]) is not None: ret[i, j] = np.abs(self.value(members[i]) - self.value(members[j])) else: ret[i, j] = float('nan') ret[j, i] = ret[i, j] return ret def distance(self, entry_id, entry_jd, rcut=50): ids_pair = [entry_id, entry_jd] ids_pair.sort() distance_entry = self.pcdb.db.distances.find_one({'pair': ids_pair}, {'distance': 1}) self.pcdb.db.distances.create_index([("pair", ASCENDING)]) if distance_entry is None: print('Distance not in DB') fingerprints = {} for entry_ijd in [entry_id, entry_jd]: if self.pcdb.db.fingerprints.find_one({'_id': entry_ijd}) is None: structure = self.get_structure(entry_ijd) analysis = StructureAnalysis(structure, radius=rcut) x, ys = analysis.fp_oganov() fingerprint = {'_id': entry_ijd} for k in ys: atomic_number1 = atomic_number(structure.species[k[0]]) atomic_number2 = atomic_number(structure.species[k[1]]) pair = '%06d' % min(atomic_number1 * 1000 + atomic_number2, atomic_number2 * 1000 + atomic_number1) fingerprint[pair] = list(ys[k]) if self.pcdb.db.fingerprints.find_one({'_id': entry_ijd}) is None: self.pcdb.db.fingerprints.insert(fingerprint) else: self.pcdb.db.fingerprints.update({'_id': entry_ijd}, fingerprint) fingerprints[entry_ijd] = fingerprint else: fingerprints[entry_ijd] = self.pcdb.db.fingerprints.find_one({'_id': entry_ijd}) dij = [] for pair in fingerprints[entry_id]: if pair in fingerprints[entry_jd] and pair != '_id': uvect1 = unit_vector(fingerprints[entry_id][pair]) uvect2 = unit_vector(fingerprints[entry_jd][pair]) dij.append(0.5 * (1.0 - np.dot(uvect1, uvect2))) distance = float(np.mean(dij)) self.pcdb.db.distances.insert({'pair': ids_pair, 'distance': distance}) else: distance = distance_entry['distance'] return distance def add_from_db(self, db_settings, sizemax=1): if self.composition is None: raise ValueError('No composition associated to this population') comp = Composition(self.composition) readdb = get_database(db_settings) index = 0 for entry in readdb.entries.find({'structure.formula': comp.formula, 'structure.natom': {'$lte': self.min_comp_mult * comp.natom, '$gte': self.max_comp_mult * comp.natom}}): if index < sizemax: print('Adding entry ' + str(entry['_id']) + ' from ' + readdb.name) self.new_entry(readdb.get_structure(entry['_id'])) index += 1 def move_random(self, entry_id, factor=0.2, in_place=False, kind='move'): structure = self.get_structure(entry_id) changer = StructureChanger(structure=structure) if kind == 'move': changer.random_move_many_atoms(epsilon=factor) else: # change changer.random_change(factor) if in_place: return self.set_structure(entry_id, changer.new_structure) else: return self.new_entry(changer.new_structure, active=False) def move(self, entry_id, entry_jd, factor=0.2, in_place=False): """ Moves entry_id in the direction of entry_jd If in_place is True the movement occurs on the same address as entry_id :param factor: :param entry_id: :param entry_jd: :param in_place: :return: """ structure_mobile = self.get_structure(entry_id) structure_target = self.get_structure(entry_jd) if structure_mobile.natom != structure_target.natom: # Moving structures with different number of atoms is only implemented for smaller structures moving # towards bigger ones by making a super-cell and only if their size is smaller that 'max_comp_mult' mult1 = structure_mobile.get_composition().gcd mult2 = structure_target.get_composition().gcd lcd = mult1 * mult2 / gcd(mult1, mult2) if lcd > self.max_comp_mult: # The resulting structure is bigger than the limit # cannot move if not in_place: return self.new_entry(structure_mobile) else: return entry_id # We will move structure1 in the direction of structure2 match = StructureMatch(structure_target, structure_mobile) match.match_size() match.match_shape() match.match_atoms() displacements = match.reduced_displacement() new_reduced = match.structure2.reduced + factor * displacements new_cell = match.structure2.cell new_symbols = match.structure2.symbols new_structure = Structure(reduced=new_reduced, symbols=new_symbols, cell=new_cell) if in_place: return self.set_structure(entry_id, new_structure) else: return self.new_entry(new_structure, active=False) def __str__(self): ret = '\n' ret += '[%s] Population type: %s\n' % (self.tag, 'Relax Structures') ret += '[%s] Database: %s\n' % (self.tag, self.name) ret += '[%s] Tag: %s\n' % (self.tag, self.tag) ret += '[%s] Target-Forces: %7.2E\n' % (self.tag, self.target_forces) ret += '[%s] Value tolerance: %7.2E\n' % (self.tag, self.value_tol) ret += '[%s] Distance tolerance: %7.2E\n\n' % (self.tag, self.distance_tolerance) if self.composition is not None: ret += '[%s] Composition: %s\n' % (self.tag, self.composition.formula) ret += '[%s] Minimal composition multiplier: %d\n' % (self.tag, self.min_comp_mult) ret += '[%s] Maximal composition multiplier: %d\n' % (self.tag, self.max_comp_mult) ret += '[%s] Members: %d\n' % (self.tag, len(self.members)) ret += '[%s] Ac
#!/usr/bin/python # openvpn.py: library to handle starting and stopping openvpn instances import subprocess import threading import time class OpenVPN(): def __init__(self, config_file=None, auth_file=None, timeout=10): self.started = False self.stopped = False self.error = False self.notifications = "" self.auth_file = auth_file self.config_file = config_file self.thread = threading.Thread(target=self._invoke_openvpn) self.thread.setDaemon(1) self.timeout = timeout def _invoke_openvpn(self): if self.auth_file is None: cmd = ['sudo', 'openvpn', '--script-security', '2', '--config', self.config_file] else: cmd = ['sudo', 'openvpn', '--script-security', '2', '--config', self.config_file, '--auth-user-pass', self.auth_file] self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.kill_switch = self.process.terminate self.starting = True while True:
line = self.process.stdout.readline().strip() if not line: break self.output_callback(line, self.process.terminate) def output_callback(self, line, kill_switch): """Set status of openvpn according to what we process""" self.notifications += line + "\n" if "Initialization Sequence Completed" in line: self.started = True if "ERROR:" in line: self.error = Tru
e if "process exiting" in line: self.stopped = True def start(self, timeout=None): """Start openvpn and block until the connection is opened or there is an error """ if not timeout: timeout = self.timeout self.thread.start() start_time = time.time() while start_time + timeout > time.time(): self.thread.join(1) if self.error or self.started: break if self.started: print "openvpn started" else: print "openvpn not started" print self.notifications def stop(self, timeout=None): """Stop openvpn""" if not timeout: timeout = self.timeout self.kill_switch() self.thread.join(timeout) if self.stopped: print "stopped" else: print "not stopped" print self.notifications
#!/usr/bin/python from __fu
ture__ import absolute_import from __future__ import print_function import sys if l
en(sys.argv) != 3: sys.stderr.write("Usage: %s 'Host Name' 'Service Description'\n" % (sys.argv[0])) sys.exit(2) ## This is for the custom nagios module sys.path.insert(1, '../') from pynag.Parsers import config ## Create the plugin option nc = config('/etc/nagios/nagios.cfg') nc.parse() service = nc.get_service(sys.argv[1],sys.argv[2]) print(nc.print_conf(service))
while True: if self.evt_stop.is_set(): break self.evt_enabled.wait() if not self.sync.tunnel: break if
self.sync.tunnel.is_up(): self.poll() time.sleep(TIMER_PERIOD) def poll(self): msg = self.sync.tunnel.po
ll() if msg: batch = [cmd.strip() for cmd in msg.split('\n') if cmd] if batch: gdb.post_event(Runner(batch)) else: gdb.post_event(Runner(['syncoff'])) self.stop() def enable(self): self.evt_enabled.set() def disable(self): self.evt_enabled.clear() def stop(self): self.evt_stop.set() class Sync(gdb.Command): def __init__(self): gdb.Command.__init__(self, "sync", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.pid = None self.maps = None self.base = None self.offset = None self.tunnel = None self.poller = None gdb.events.exited.connect(self.exit_handler) gdb.events.cont.connect(self.cont_handler) gdb.events.stop.connect(self.stop_handler) gdb.events.new_objfile.connect(self.newobj_handler) print("[sync] commands added") def identity(self): f = tempfile.NamedTemporaryFile() gdb.execute("shell uname -svm > %s" % f.name) id = open(f.name, 'r').read() f.close() return id.strip() def mod_info(self, addr): if not self.maps: self.maps = get_maps() if not self.maps: print("[sync] failed to get maps") return None return get_mod_by_addr(self.maps, addr) def locate(self): offset = get_pc() if not offset: print("<not running>") return if not self.pid: self.pid = get_pid() if not self.pid: print("[sync] failed to get pid") return else: print("[sync] pid: %s" % self.pid) self.offset = offset mod = self.mod_info(self.offset) if mod: if VERBOSE >= 2: print("[sync] mod found") print(mod) base, sym = mod if self.base != base: self.tunnel.send("[notice]{\"type\":\"module\",\"path\":\"%s\"}\n" % sym) self.base = base self.tunnel.send("[sync]{\"type\":\"loc\",\"base\":%d,\"offset\":%d}\n" % (self.base, self.offset)) else: print("[sync] unknown module at 0x%x" % self.offset) self.base = None self.offset = None def create_poll_timer(self): if not self.poller: self.poller = Poller(self) self.poller.start() def release_poll_timer(self): if self.poller: self.poller.stop() self.poller = None def newobj_handler(self, event): # force a new capture self.maps = None def cont_handler(self, event): if self.tunnel: self.poller.disable() return '' def stop_handler(self, event): if self.tunnel: self.locate() self.poller.enable() return '' def exit_handler(self, event): self.reset_state() print("[sync] exit, sync finished") def reset_state(self): try: self.release_poll_timer() if self.tunnel: self.tunnel.close() self.tunnel = None self.pid = None self.maps = None self.base = None self.offset = None except Exception as e: print(e) def invoke(self, arg, from_tty): if self.tunnel and not self.tunnel.is_up(): self.tunnel = None if not self.tunnel: if arg == "": arg = HOST self.tunnel = Tunnel(arg) if not self.tunnel.is_up(): print("[sync] sync failed") return id = self.identity() self.tunnel.send("[notice]{\"type\":\"new_dbg\",\"msg\":\"dbg connect - %s\",\"dialect\":\"gdb\"}\n" % id) print("[sync] sync is now enabled with host %s" % str(arg)) self.create_poll_timer() else: print('(update)') self.locate() self.poller.enable() class Syncoff(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "syncoff", gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): self.sync.reset_state() print("[sync] sync is now disabled") class Cmt(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "cmt", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return if arg == "": print("[sync] usage: cmt [-a 0xBADF00D] <cmt to add>") return self.sync.tunnel.send("[sync]{\"type\":\"cmt\",\"msg\":\"%s\",\"base\":%d,\"offset\":%d}\n" % (arg, self.sync.base, self.sync.offset)) class Fcmt(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "fcmt", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return self.sync.tunnel.send("[sync]{\"type\":\"fcmt\",\"msg\":\"%s\",\"base\":%d,\"offset\":%d}\n" % (arg, self.sync.base, self.sync.offset)) class Rcmt(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "rcmt", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return self.sync.tunnel.send("[sync]{\"type\":\"rcmt\",\"msg\":\"%s\",\"base\":%d,\"offset\":%d}\n" % (arg, self.sync.base, self.sync.offset)) class Translate(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "translate", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return base, address, module = [a.strip() for a in arg.split(" ")] maps = get_maps() if not maps: print("[sync] failed to get maps") return None mod = get_mod_by_name(maps, module) if not mod: print("[sync] failed to locate module %s" % module) return None mod_base, mod_sym = mod rebased = int(address, 16) - int(base, 16) + mod_base print("[sync] module %s based at 0x%x, rebased address: 0x%x\n" % (mod_sym, mod_base, rebased)) class Bc(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "bc", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return if arg == "": arg = "oneshot" if not (arg in ["on", "off", "oneshot"]): print("[sync] usage: bc <|on|off>") return self.sync.tunnel.send("[notice]{\"type\":\"bc\",\"msg\":\"%s\",\"base\":%d,\"offset\":%d}\n" % (arg, self.sync.base, self.sync.offset)) class Cmd(gdb.Command): def __init__(self, sync): gdb.Command.__init__(self, "cmd", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.sync = sync def invoke(self, arg, from_tty): if not self.sync.base: print("[sync] process not synced, command is dropped") return if arg == "": print("[
i
mport time def start(): ret
urn time.time()
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for displaying peer review analytics.""" __author__ = 'Sean Lip (sll@google.com)' import os from common import safe_dom from controllers.utils import ApplicationHandler from controllers.utils import HUMAN_READABLE_TIME_FORMAT import jinja2 from models import courses from models import jobs from models import transforms from models import utils from modules.review import peer class ReviewStatsAggregator(object): """Aggregates peer review statistics.""" def __init__(self): # This dict records, for each unit, how many submissions have a given # number of completed reviews. The format of each key-value pair is # unit_id: {num_reviews: count_of_submissions} self.counts_by_completed_reviews = {} def visit(self, review_summary): unit_id = review_summary.unit_id if unit_id not in self.counts_by_completed_reviews: self.counts_by_completed_reviews[unit_id] = {} count = review_summary.completed_count if count not in self.counts_by_completed_reviews[unit_id]: self.counts_by_completed_reviews[unit_id][count] = 1 else: self.counts_by_completed_reviews[unit_id][count] += 1 class ComputeReviewStats(jobs.DurableJob): """A job for computing peer review statistics.""" def run(self): """Computes peer review statistics.""" stats = ReviewStatsAggregator() mapper = utils.QueryMapper( peer.ReviewSummary.all(), batch_size=500, report_every=1000) mapper.run(stats.visit) completed_arrays_by_unit = {} for unit_id in stats.counts_by_completed_reviews: max_completed_reviews = max( stats.counts_by_completed_reviews[unit_id].keys()) completed_reviews_array = [] for i in range(max_completed_reviews + 1): if i in stats.counts_by_completed_reviews[unit_id]: completed_reviews_array.append( stats.counts_by_completed_reviews[unit_id][i]) else: completed_reviews_array.append(0) completed_arrays_by_unit[unit_id] = completed_reviews_array return {'counts_by_completed_reviews': completed_arrays_by_unit} class PeerReviewStatsHandler(ApplicationHandler): """Shows peer review analytics on the dashboard.""" # The key used in the statistics dict that generates the dashboard page. # Must be unique. name = 'peer_review_stats' # The class that generates the data to be displayed. stats_computer = ComputeReviewStats def get_markup(self, job): """Returns Jinja markup for peer review statistics.""" errors = [] stats_calculated = False update_message = safe_dom.Text('') course = courses.Course(self) serialized_units = [] if not job: update_message = safe_dom.Text( 'Peer review statistics have not been calculated yet.') else: if job.status_code == jobs.STATUS_CODE_COMPLETED: stats = transforms.loads(job.output) stats_calculated = True for unit in course.get_peer_reviewed_units(): if unit.unit_id in stats['counts_by_completed_reviews']: unit_stats = ( stats['counts_by_completed_reviews'][unit.unit_id]) serialized_units.append({ 'stats': unit_stats, 'title': unit.title, 'unit_id': unit.unit_id, }) update_message = safe_dom.Text(""" Peer review statistics were last updated at %s in about %s second(s).""" % ( job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT), job.execution_time_sec)) elif job.status_code == jobs.STATUS_CODE_FAILED: update_message = safe_dom.NodeList().append( safe_dom.Text(""" There was an error updating peer review statistics. Here is the message:""") ).append( safe_dom.Element('br') ).append( safe_dom.Element('blockquote').add_child( safe_dom.Element('pre').add_text('\n%s' % job.output))) else: update_message = safe_dom.Text(""" Peer review statistics update started at %s and is running now. Please come back shortly.""" % job.updated_on.strftime( HUMAN_READABLE_TIME_FORMAT)) return jinja2.utils.Markup(self.get_template( 'stats.html', [os.path.dirname(__file__)] ).render({ 'erro
rs': errors, 'serialized_un
its': serialized_units, 'serialized_units_json': transforms.dumps(serialized_units), 'stats_calculated': stats_calculated, 'update_message': update_message, }, autoescape=True))
ce position in kpc xyz3 = xyz + dxyz # back to the lab, deg (ra3,dec3) = xyztoradec(xyz3) # adjusted angular displacement, deg dra = ra3 - ra # tedious RA wrapping dra += 360. * (dra < -180) dra -= 360. * (dra > 180) # convert back to proper motions return ((dra * cos(deg2rad(dec3)) / dyr) * 3.6e6, ((dec3 - dec) / dyr) * 3.6e6) def axis_angle_rotation_matrix(axis, angle): ''' axis: 3-vector about which to rotate angle: angle about which to rotate, in degrees. Returns: 3x3 rotation matrix ''' theta = np.deg2rad(angle) ct = np.cos(theta) st = np.sin(theta) ux,uy,uz = axis / np.sqrt(np.sum(axis**2)) R = np.array([ [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st], [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct)-ux*st], [uz*ux*(1-ct)-uy*st, uz*uy*(1-ct)+ux*st, ct+uz**2*(1-ct)], ]) return R # the north galactic pole, (RA,Dec), in degrees, from Bovy. # This matches Schlegel's email of 2015-02-19 citing the # Hipparcos explanatory supplement. galactic_pole = (192.85948, 27.12825) # vs Wikipedia's (192.859508, 27.128336) # This puts (RA,DEC) = (1,1) at (l,b) = (98.941031, -59.643798). # returns (xhat, yhat, zhat), unit vectors in the RA,Dec unit sphere # of the galactic coordinates. def galactic_unit_vectors(): # Galactic longitude of celestial equator lomega = 32.93192 # direction to Galactic Pole zhat = radectoxyz(*galactic_pole).T # where the galactic plane crosses the equatorial plane X = np.cross(zhat.T, np.array([[0,0,-1],])) X /= np.sqrt(np.sum(X**2)) # Rotate X by lomega around zhat. Rx = axis_angle_rotation_matrix(zhat[:,0], -lomega) Ry = axis_angle_rotation_matrix(zhat[:,0], 90.-lomega) xhat = np.dot(Rx, X.T) yhat = -np.cross(zhat.T, xhat.T).T return (xhat, yhat, zhat) def mjdtodate(mjd): jd = mjdtojd(mjd) return jdtodate(jd) def jdtodate(jd): unixtime = (jd - 2440587.5) * 86400. # in seconds return datetime.datetime.utcfromtimestamp(unixtime) def mjdtojd(mjd): return mjd + 2400000.5 def jdtomjd(jd): return jd - 2400000.5 def timedeltatodays(dt): return dt.days + (dt.seconds + dt.microseconds/1e6)/86400. def datetomjd(d): d0 = datetime.datetime(1858, 11, 17, 0, 0, 0) dt = d - d0 # dt is a timedelta object. return timedeltatodays(dt) def datetojd(d): return mjdtojd(datetomjd(d)) # UTC for 2000 January 1.5 J2000 = datetime.datetime(2000,1,1,12,0,0,0,tzinfo=None) # -> jd 2451545.0 def ecliptic_basis(eclipticangle = 23.439281): Equinox= array([1,0,0]) CelestialPole = array([0,0,1]) YPole = cross(CelestialPole, Equinox) EclipticAngle= deg2rad(eclipticangle) EclipticPole= (CelestialPole * cos(EclipticAngle) - YPole * sin(EclipticAngle)) Ydir = cross(EclipticPole, Equinox) return (Equinox, Ydir, EclipticPole) meters_per_au = 1.4959e11 # thanks, Google speed_of_light = 2.99792458e8 # m/s seconds_per_day = 86400. days_per_year = 365.25 def days_to_years(d): return d / days_per_year def au_to_meters(au): return au * meters_per_au def seconds_to_days(s): return s / seconds_per_day # Returns the light travel time for the given distance (in AU), in days. def au_light_travel_time_days(au): return seconds_to_days(au_to_meters(au) / speed_of_light) def hms2ra(h, m, s): return 15. * (h + (m + s/60.)/60.) def tokenize_hms(s): s = s.strip() tokens = s.split() tokens = reduce(list.__add__, [t.split(':') for t in tokens]) h = len(tokens) >= 1 and float(tokens[0]) or 0 m = len(tokens) >= 2 and float(tokens[1]) or 0 s = len(tokens) >= 3 and float(tokens[2]) or 0 return (h,m,s) def hmsstring2ra(st): ''' >>> st = "00 44 02.08" >>> hmsstring2ra(st) 11.008666666666667 >>> ra2hmsstring(hmsstring2ra(st), sec_digits=2) == st True ''' (h,m,s) = tokenize_hms(st) return hms2ra(h, m, s) def dms2dec(sign, d, m, s): return sign * (d + (m + s/60.)/60.) def dmsstring2dec(s): sign = (s[0] == '-') and -1.0 or 1.0 if s[0] == '-' or s[0] == '+': s = s[1:] (d,m,s) = tokenize_hms(s) return dms2dec(sign, d, m, s) # RA in degrees def ra2hms(ra): ra = ra_normalize(ra) h = ra * 24. / 360. hh = int(floor(h)) m = (h - hh) * 60. mm = int(floor(m)) s = (m - mm) * 60. return (hh, mm, s) # Dec in degrees def dec2dms(dec): sgn =
(dec >= 0) and 1. or -1. d = dec * sgn dd = int(floor(d)) m = (d - dd) * 60. mm = int(floor(m)) s = (m - mm) * 60. if s >= 60.:
m += 1. s -= 60. # don't just return sgn*d because values between 0 and 1 deg will get you! return (sgn, d, m, s) # RA in degrees def ra2hmsstring(ra, separator=' ', sec_digits=3): (h,m,s) = ra2hms(ra) #print 'hms', h,m,s ss = int(floor(s)) #ds = int(round((s - ss) * 1000.0)) # fractional seconds fs = s - ss #print 'ss,fs', ss, fs fracstr = '%.*f' % (sec_digits, fs) #print 'fracstr', fracstr if fs >= 1.: ss += 1 fs -= 1. if sec_digits > 0: fracstr = '%.*f' % (sec_digits, fs) if fracstr[0] == '1': ss += 1 fs -= 1. if ss >= 60: ss -= 60 m += 1 if m >= 60: m -= 60 h += 1 if sec_digits == 0: sstr = '%0.2i' % (ss) else: #sfmt = '%%0.2i.%%0.%ii' % (sec_digits) #sstr = sfmt % (ss, ds) sstr = '%0.2i' % ss # fractional seconds string -- 0.XXX fracstr = '%.*f' % (sec_digits, fs) #print 'fracstr', fracstr if fracstr[0] == '-': fracstr = fracstr[1:] assert(fracstr[0] == '0') sstr += fracstr[1:] return separator.join(['%0.2i' % h, '%0.2i' % m, sstr]) # Dec in degrees def dec2dmsstring(dec, separator=' ', sec_digits=3): ''' >>> dec2dmsstring(41.5955538864, sec_digits=3) '+41 35 43.994' >>> dec2dmsstring(41.5955538864, sec_digits=2) '+41 35 43.99' >>> dec2dmsstring(41.5955538864, sec_digits=1) '+41 35 44.0' ''' (sgn, d,m,s) = dec2dms(dec) ss = int(floor(s)) fs = s - ss if sgn > 0: signc = '+' else: signc = '-' if sec_digits == 0: sstr = '%0.2i' % (ss) else: # fractional seconds string -- 0.XXX fracstr = '%.*f' % (sec_digits, fs) # but it can be 1.00 ... #print 'dec fracstr', fracstr if fracstr[0] == '1': ss += 1 if ss >= 60: ss -= 60 m += 1 if m >= 60: m -= 60 d += 1 sstr = '%0.2i' % ss + fracstr[1:] return separator.join(['%c%0.2i' % (signc, d), '%0.2i' % m, sstr]) def xyzarrtoradec(xyz): return (degrees(xy2ra(xyz[0], xyz[1])), degrees(z2dec(xyz[2]))) def deg2rad(d): return d*pi/180.0 def deg2arcmin(d): return d * 60. def deg2arcsec(d): return d * 3600. def rad2arcmin(r): return 10800.0*r/pi def arcmin2rad(a): return a*pi/10800.0 def arcmin2deg(a): return a/60. def arcmin2rad(a): return deg2rad(arcmin2deg(a)) def radec2x(r,d): return cos(d)*cos(r) # r,d in radians def radec2y(r,d): return cos(d)*sin(r) # r,d in radians def radec2z(r,d): return sin(d) # r,d in radians def z2dec(z): return asin(z) # result in radians def xy2ra(x,y): "Convert x,y to ra in radians" r = atan2(y,x) r += 2*pi*(r<0.) return r def rad2distsq(rad): return 2. * (1. - cos(rad)) def arcsec2distsq(arcsec): return rad2distsq(arcsec2rad(arcsec)) def arcsec2dist(arcsec): return sqrt(arcsec2distsq(arcsec)) def arcmin2distsq(arcmin): return rad2distsq(arcmin2rad(arcmin)) def arcmin2dist(arcmin): return sqrt(arcmin2distsq(arcmin)) def dist2arcsec(dist): return distsq2arcsec(dist**2) def dist2deg(dist): return distsq2deg(dist**2) if __name__ == '__main__': import doctest doctest.testmod() assert(ra_ranges_overlap(359, 1, 0.5, 1.5) == True) assert(ra_range
""" A "mirroring" ``stdout`` context manager. While active, the context manager reverses text output to ``stdout``:: # BEGIN MIRROR_GEN_DEMO_1 >>> from mirror_gen import looking_glass >>> with looking_glass() as what: # <1> ... print('Alice, Kitty and Snowdrop') ... print(what) ... pordwonS dna yttiK ,ecilA YKCOWREBBAJ >>> what 'JABBERWOCKY' # END MIRROR_GEN_DEMO_1 This exposes the context manager operation:: # BEGIN MIRROR_GEN_DEMO_2 >>> from mirror_gen import looking_glass >>> manager = looking_glass() # <1> >>> manager # doctest: +ELLIPSIS <contextlib._GeneratorContextManager object at 0x...> >>> monster = manager.__enter__() # <2> >>> monster == 'JABBERWOCKY' # <3> eurT >>> monster 'YKCOWREBBAJ' >>> manager # doctest: +ELLIPSIS >...x0 ta tcejbo reganaMtxetnoCrotareneG_.biltxetnoc< >>> manager.__exit__(None, None, None) # <4> >>> monster 'JABBERWOCKY' # END MIRROR_GEN_DEMO_2
""" # BEGIN MIRROR_GEN_EX import contextlib @contextlib.con
textmanager # <1> def looking_glass(): import sys original_write = sys.stdout.write # <2> def reverse_write(text): # <3> original_write(text[::-1]) sys.stdout.write = reverse_write # <4> yield 'JABBERWOCKY' # <5> sys.stdout.write = original_write # <6> # END MIRROR_GEN_EX
import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.core.dtypes.dtypes import PeriodDtype import pandas as pd from pandas.core.arrays import PeriodArray from pandas.tests.extension import base @pytest.fixture def dtype(): return PeriodDtype(freq='D') @pytest.fixture def data(dtype): return PeriodArray(np.arange(1970, 2070), freq=dtype.freq) @pytest.fixture def data_for_sorting(dtype): return PeriodArray([2018, 2019, 2017], freq=dtype.freq) @pytest.fixture def data_missing(dtype): return PeriodArray([iNaT, 2017], freq=dtype.freq) @pytest.fixture def data_missing_for_sorting(dtype): return Per
iodArray([2018, iNaT, 2017], freq=dtype.freq) @pytest.fixture def data_for_grouping(dtype): B = 2018 NA = iNaT A = 2017 C = 2019 return P
eriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq) @pytest.fixture def na_value(): return pd.NaT class BasePeriodTests(object): pass class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests): pass class TestConstructors(BasePeriodTests, base.BaseConstructorsTests): pass class TestGetitem(BasePeriodTests, base.BaseGetitemTests): pass class TestMethods(BasePeriodTests, base.BaseMethodsTests): def test_combine_add(self, data_repeated): # Period + Period is not defined. pass class TestInterface(BasePeriodTests, base.BaseInterfaceTests): pass class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests): implements = {'__sub__', '__rsub__'} def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # we implement substitution... if all_arithmetic_operators in self.implements: s = pd.Series(data) self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None) else: # ... but not the rest. super(TestArithmeticOps, self).test_arith_series_with_scalar( data, all_arithmetic_operators ) def test_arith_series_with_array(self, data, all_arithmetic_operators): if all_arithmetic_operators in self.implements: s = pd.Series(data) self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None) else: # ... but not the rest. super(TestArithmeticOps, self).test_arith_series_with_scalar( data, all_arithmetic_operators ) def _check_divmod_op(self, s, op, other, exc=NotImplementedError): super(TestArithmeticOps, self)._check_divmod_op( s, op, other, exc=TypeError ) def test_add_series_with_extension_array(self, data): # we don't implement + for Period s = pd.Series(data) msg = (r"unsupported operand type\(s\) for \+: " r"\'PeriodArray\' and \'PeriodArray\'") with pytest.raises(TypeError, match=msg): s + data def test_error(self): pass def test_direct_arith_with_series_returns_not_implemented(self, data): # Override to use __sub__ instead of __add__ other = pd.Series(data) result = data.__sub__(other) assert result is NotImplemented class TestCasting(BasePeriodTests, base.BaseCastingTests): pass class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests): def _compare_other(self, s, data, op_name, other): # the base test is not appropriate for us. We raise on comparison # with (some) integers, depending on the value. pass class TestMissing(BasePeriodTests, base.BaseMissingTests): pass class TestReshaping(BasePeriodTests, base.BaseReshapingTests): pass class TestSetitem(BasePeriodTests, base.BaseSetitemTests): pass class TestGroupby(BasePeriodTests, base.BaseGroupbyTests): pass class TestPrinting(BasePeriodTests, base.BasePrintingTests): pass class TestParsing(BasePeriodTests, base.BaseParsingTests): @pytest.mark.parametrize('engine', ['c', 'python']) def test_EA_types(self, engine, data): expected_msg = r'.*must implement _from_sequence_of_strings.*' with pytest.raises(NotImplementedError, match=expected_msg): super(TestParsing, self).test_EA_types(engine, data)
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import api as compute_api from nova.compute import manager as compute_manager from nova.servicegroup import api as service_group_api from nova.tests.integrated.v3 import test_servers class EvacuateJsonTest(test_servers.ServersSampleBase): extension_name = "os-evacuate" def _test_evacuate(self, req_subs, server_req, server_resp, expected_resp_code): self.uuid = self._post_server() def fake_service_is_up(self, service): """Simulate validation of instance host is down.""" return False def fake_service_get_by_compute_host(self, context, host): """Simulate that given host is a valid host.""" return { 'host_name': host, 'service': 'compute', 'zone': 'nova' } def fake_check_instance_exists(self, context, instance): """Simulate validation of instance does not exist.""" return False self.stubs.Set(service_group_api.API, 'service_is_up', fake_service_is_up) self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host', fake_service_get_by_compute_host) self.stubs.Set(compute_manager.ComputeManager, '_check_instance_exists', fake_check_instance_exists) response = self._do_post('servers/%s/action' % self.uuid, server_req, req_subs) subs = self._get_regexes() self._verify_response(server_resp, subs, response, expected_resp_code) @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate(self, rebuild_mock): # Note (wingwj): The host can't be the same one req_subs = { 'host': '
testHost', "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-req', 'server-evacuate-resp', 202) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_
image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host='testHost') @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate_find_host(self, rebuild_mock): req_subs = { "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-find-host-req', 'server-evacuate-find-host-resp', 202) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host=None)
#!/usr/bin/python2.7 # # This file is part of drizzle-ci # # Copyright (c) 2013 Sharan Kumar M # # drizzle-ci is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # drizzle-ci is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with drizzle-ci. If not, see <http://www.gnu.org/licenses/>. # # # ========================== # Test script for drizzle-ci # ========================== # imports import logging import os import re import signal import subprocess import sys # configuring paths path = {} path['root'] = os.getcwd() path['state'] = '/srv/salt' path['pillar'] = '/srv/pillar' # configuring variables logging.basicConfig(format='%(levelname)s:%(message)s',level=logging.INFO) log = logging.getLogger(__name__) copy = 'sudo cp -r {0} {1}' top_file = '''base: '*': - {0} ''' # functions def process_command_line(): ''' A function to return the command line arguments as a dictionary of items ''' opt = {} argv = sys.argv[1:] if len(argv) is 0: opt['minion'] = ['*'] opt['state'] = ['drizzle-dbd','drizzle','jenkins','nova','salt','sysbench','users'] return opt for arg in argv: key = arg.split('=')[0][2:] opt[key] = arg.split('=')[1].split(',') return opt def keyboard_interrupt(signal_type,handler): ''' This function handles the keyboard interrupt ''' log.info('\t\tPressed CTRL+C') log.info('\t\texiting...') exit(0) # processing the command line and kick start! opt = process_command_line() signal.signal(signal.SIGINT,keyboard_interrupt) log.info('\t\tsetting up the environment') # setting up the environment cmd = copy.format(path['state']+'/top.sls',path['state']+'/top.sls.bak') os.system(cmd) cmd = copy.format(path['root']+'/salt',path['state']) os.system(cmd) cmd = copy.format(path['root']+'/pillar', path['pillar']) os.system(cmd) # refreshing pillar data log.info('\t\tsetting up pillar data') for minion in opt['minion']: subprocess.Popen(['sudo','salt',minion,'saltutil.refresh_pillar'],stdout=subprocess.PIPE) # processing each state log.inf
o('\n\t\t==================================================') log.i
nfo('\t\tstate minion status ') log.info('\t\t==================================================') for state in opt['state']: top_data = top_file.format(state) with open(path['state']+'/top.sls', 'w') as top_sls: top_sls.write(top_data) for minion in opt['minion']: output = subprocess.Popen(['sudo', 'salt', minion, 'state.highstate'], stdout=subprocess.PIPE) result, error = output.communicate() if error is not None: logging.info('ERROR') logging.info(error) failure = re.search(r'Result:\s+False',result) if failure is not None: status = 'FAILURE' else: status = 'OK' log.info('\t\t'+state.ljust(20)+minion.ljust(20)+status.ljust(10)) # restoring the original top.sls and cleaning up.. log.info('\t\t==================================================') log.info('\n\t\tcleaning up...') cmd = 'sudo mv {0} {1}'.format(path['state']+'/top.sls.bak', path['state']+'/top.sls') os.system(cmd) log.info('\t\tsuccessfully executed')
import curses import functools from stem.control import EventType, Controller from stem.util import str_tools # colors that curses can handle COLOR_LIST = { "red": curses.COLOR_RED, "green": curses.COLOR_GREEN, "yellow": curses.COLOR_YELLOW, "blue": curses.COLOR_BLUE, "cyan": curses.COLOR_CYAN, "magenta": curses.COLOR_MAGENTA, "black": curses.COLOR_BLACK, "white": curses.COLOR_WHITE, } GRAPH_WIDTH = 40 GRAPH_HEIGHT = 8 DOWNLOAD_COLOR = "green" UPLOAD_COLOR = "blue" def main(): with Controller.from_port(port = 9051) as controller: controller.authenticate() try: # This makes curses initialize and call draw_bandwidth_graph() with a # reference to the screen, followed by additional arguments (in this # case just the controller). curses.wrapper(draw_bandwidth_graph, controller) except KeyboardInterrupt: pass # the user hit ctrl+c def draw_bandwidth_graph(stdscr, controller): window = Window(stdscr) # (downloaded, uploaded) tuples for the last 40 seconds bandwidth_rates = [(0, 0)] * GRAPH_WIDTH # Making a partial that wraps the window and bandwidth_rates with a function # for Tor to call when it gets a BW event. This causes the 'window' and # 'bandwidth_rates' to be provided as the first two arguments whenever # 'bw_event_handler()' is called. bw_event_handler = functools.partial(_handle_bandwidth_event, window, bandwidth_rates) # Registering this listener with Tor. Tor reports a BW event each second. controller.add_event_listener(bw_event_handler, EventType.BW) # Pause the main thread until the user hits any key... and no, don't you dare # ask where the 'any' key is. :P stdscr.getch() def _handle_bandwidth_event(window, bandwidth_rates, event): # callback for when tor provides us with a BW event bandwidth_rates.insert(0, (event.read, event.written)) bandwidth_rates = bandwidth_rates[:GRAPH_WIDTH] # truncate old values _render_graph(window, bandwidth_rates) def _render_graph(window, bandwidth_rates): window.erase() download_rates = [entry[0] for entry in bandwidth_rates] upload_rates = [entry[1] for entry in bandwidth_rates] # show the latest values at the top l
abel = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1) window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD) label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1) window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD) # draw the graph bounds in KB max_download_rate = max(download_rates) max_upload_rate = max(upload_rates) window.addstr(1, 1, "%4i"
% (max_download_rate / 1024), DOWNLOAD_COLOR) window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR) window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR) window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR) # draw the graph for col in range(GRAPH_WIDTH): col_height = GRAPH_HEIGHT * download_rates[col] / max(max_download_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT) col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT) window.refresh() class Window(object): """ Simple wrapper for the curses standard screen object. """ def __init__(self, stdscr): self._stdscr = stdscr # Mappings of names to the curses color attribute. Initially these all # reference black text, but if the terminal can handle color then # they're set with that foreground color. self._colors = dict([(color, 0) for color in COLOR_LIST]) # allows for background transparency try: curses.use_default_colors() except curses.error: pass # makes the cursor invisible try: curses.curs_set(0) except curses.error: pass # initializes colors if the terminal can handle them try: if curses.has_colors(): color_pair = 1 for name, foreground in COLOR_LIST.items(): background = -1 # allows for default (possibly transparent) background curses.init_pair(color_pair, foreground, background) self._colors[name] = curses.color_pair(color_pair) color_pair += 1 except curses.error: pass def addstr(self, y, x, msg, color = None, attr = curses.A_NORMAL): # Curses throws an error if we try to draw a message that spans out of the # window's bounds (... seriously?), so doing our best to avoid that. if color is not None: if color not in self._colors: recognized_colors = ", ".join(self._colors.keys()) raise ValueError("The '%s' color isn't recognized: %s" % (color, recognized_colors)) attr |= self._colors[color] max_y, max_x = self._stdscr.getmaxyx() if max_x > x and max_y > y: try: self._stdscr.addstr(y, x, msg[:max_x - x], attr) except: pass # maybe an edge case while resizing the window def erase(self): self._stdscr.erase() def refresh(self): self._stdscr.refresh() if __name__ == '__main__': main()
#/u/GoldenSights import praw import time import datetime import sqlite3 '''USER CONFIGURATION''' APP_ID = "" APP_SECRET = "" APP_URI = "" APP_REFRESH = "" # https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot". SUBREDDIT = "GoldTesting" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..." MAXPOSTS = 60 #This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. WAIT = 30 #This is how many seconds you will wait between cycles. The bot is completely inactive during th
is time. DELAY = 86400 #This is the time, IN SECONDS, which the post will hold the active flair IGNOREMODS = False #Do you want the bot to ignore posts made by moderators? Use True or False (With capitals! No quotations!) IGNORESELFPOST = False #Do you want the bot to ignore selfposts? IGNORELINK = True #
Do you want the bot to ignore linkposts? FLAIRACTIVE = "Active" CSSACTIVE = "active" #The flair text and css class assigned to unsolved posts. TITLEREQS = ['[',']'] #Every part of this list must be included in the title '''All done!''' WAITS = str(WAIT) try: import bot USERAGENT = bot.getaG() except ImportError: pass sql = sqlite3.connect('sql.db') print('Loaded SQL Database') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)') print('Loaded Oldposts') sql.commit() r = praw.Reddit(USERAGENT) r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI) r.refresh_access_information(APP_REFRESH) def getTime(bool): timeNow = datetime.datetime.now(datetime.timezone.utc) timeUnix = timeNow.timestamp() if bool == False: return timeNow else: return timeUnix def scan(): print('Scanning ' + SUBREDDIT) subreddit = r.get_subreddit(SUBREDDIT) moderators = subreddit.get_moderators() mods = [] for moderator in moderators: mods.append(moderator.name) posts = subreddit.get_new(limit=MAXPOSTS) for post in posts: ctimes = [] pid = post.id ptitle = post.title.lower() try: pauthor = post.author.name except AttributeError: pauthor = '[deleted]' ptime = post.created_utc cur.execute('SELECT * FROM oldposts WHERE id=?', [pid]) if not cur.fetchone(): if (post.is_self == True and IGNORESELFPOST == False) or (post.is_self == False and IGNORELINK == False): if pauthor not in mods or IGNOREMODS == False: if all(char.lower() in ptitle for char in TITLEREQS): try: flair = post.link_flair_text.lower() except AttributeError: flair = '' if flair == '': print(pid + ': No Flair') now = getTime(True) if (now - ptime) > DELAY: print('\tOld. Ignoring') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print('\tAssigning Active Flair') post.set_flair(flair_text=FLAIRACTIVE,flair_css_class=CSSACTIVE) elif flair == FLAIRACTIVE.lower(): print(pid + ': Active') now = getTime(True) if (now-ptime) > DELAY: print('\tOld. Removing Flair') post.set_flair(flair_text="",flair_css_class="") cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print('\tActive for ' + ('%.0f' % (DELAY-(now-ptime))) + ' more seconds') else: print(pid + ': Does not contain titlereq') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) if pauthor in mods and IGNOREMODS == True: print(pid + ', ' + pauthor + ': Ignoring Moderator') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print(pid + ', ' + pauthor + ': Ignoring post') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) sql.commit() while True: try: scan() except Exception as e: print('An error has occured:', str(e)) sql.commit() print('Running again in ' + WAITS + ' seconds.\n') time.sleep(WAIT)
r/bin/env python # -*- coding: ascii -*- r""" LazGUI helps to create Lazarus Pascal GUI project. LazGUI will place all of the required files for the Lazarus project into a subdirectory by project name. The project can be built using "lazbuild" that comes with a Lazarus install, or by opening the <project_name>.lpi file with the Lazarus IDE. LazGUI Copyright (C) 2016 Charlie Taylor This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ----------------------- """ import os, sys import shutil here = os.path.abspath(os.path.dirname(__file__)) ref_proj_files = os.path.join( here, 'ref_proj_files' ) #print 'ref_proj_files =',ref_proj_files from lpi_wrapper import LPI_File from lps_wrapper import LPS_File from lpr_wrapper import LPR_File # for multi-file projects see LICENSE file for authorship info # for single file projects, insert following information __author__ = 'Charlie Taylor' __copyright__ = 'Copyright (c) 2016 Charlie Taylor' __license__ = 'GPL-3' exec( open(os.path.join( here,'_version.py' )).read() ) # creates local __version__ variable __email__ = "cet@appliedpython.com" __status__ = "3 - Alpha" # "3 - Alpha", "4 - Beta", "5 - Production/Stable" # # import statements here. (built-in first, then 3rd party, then yours) # # Code goes below. # Adjust docstrings to suite your taste/requirements. # class LazarusGUI(object): """LazGUI helps to create Lazarus Pascal GUI project.""" def __init__(self, project_name='project1', form1_obj=None, data_file_ext='proj_dat'): """Inits LazarusGUI""" self.project_name = str(project_name) self.data_file_ext= data_file_ext self.form_name_set = set() # save set of form names in lower case self.formL = [] if form1_obj is not None: self.add_form( form1_obj ) def add_form(self, form_obj): form_name = form_obj.form_name form_obj.set_laz_gui_obj( self ) # Don't allow duplicate form names while form_name.lower() in self.form_name_set: form_name = form_name + str( (len(self.formL) + 1) ) self.form_name_set.add( form_name.lower() ) self.formL.append( form_obj ) def save_project_files(self, path_name='', over_write_OK=False): if len(self.formL)==0: print 'Can NOT create project... No Forms have been added.' return targ_abs_path = os.path.abspath( path_name ) if os.path.isfile( targ_abs_path ): print 'Can NOT create project... The provided path_name is an existing file.' print 'Need to provide a directory name.' print 'Existing file =',targ_abs_path return if os.path.isdir( targ_abs_path ): if over_write_OK: print 'Using existing directory for Lazarus project.' print 'path_name =',targ_abs_path else: print 'Can NOT create project... The provided directory already exists.' print 'Enter a new directory name OR set parameter "over_write_OK=True".' print 'Existing directory =',targ_abs_path return else: os.mkdir( targ_abs_path ) print "created new Lazarus project directory:",targ_abs_path form1 = self.formL[0] lpi_obj = LPI_File( project_name=self.project_name, form1_name=form1.form_name ) lps_obj = LPS_File( project_name=self.project_name, form1_name=form1.form_name ) lpr_obj = LPR_File( project_name=self.project_name, form1_name=form1.form_name ) for f in self.formL[1:]: lpi_obj.add_form( new_form_name=f.form_name ) lps_obj.add_form( new_form_name=f.form_name ) lpr_obj.add_form( new_form_name=f.form_name ) # copy I/O Variable Get/Set, and required menu History files for copy_fname in ['get_set_io_var.pas', 'HistoryFiles.pas', 'HistoryLazarus.lrs']: src_fname = os.path.join( ref_proj_files, copy_fname ) targ_fname = os.path.join( targ_abs_path, copy_fname ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create Resource File src_fname = os.path.join( ref_proj_files, 'project1.res' )
targ_fname = os.path.join( targ_abs_path, '%s.res'%self.project_name ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create Icon src_fname = os.path.join( ref_proj_files, 'project1.ico' ) targ_fname = os.path.join( targ_abs_path, '%s.ico'%self.project_name ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create *.lpi file (i.e. ProjectOptions, Units, CompilerOptions, Debugging) targ_fname = os.path.join( targ_abs_path, '%s.lpi'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lpi_obj.file_contents() ) # Create *.lps file (i.e. ProjectSession, Units, PathDelim) targ_fname = os.path.join( targ_abs_path, '%s.lps'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lps_obj.file_contents() ) # Create *.lpr file (i.e. Pascal source for overall project) targ_fname = os.path.join( targ_abs_path, '%s.lpr'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lpr_obj.file_contents() ) # Create *.pas and *.lfm for each of the Form units for form in self.formL: targ_fname = os.path.join( targ_abs_path, '%s.pas'%form.unit_name.lower() ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( form.pas_file_contents() ) targ_fname = os.path.join( targ_abs_path, '%s.lfm'%form.unit_name.lower() ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( form.lfm_file_contents() ) # Create *.bat file to compile and run project targ_fname = os.path.join( targ_abs_path, '%s.bat'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( BAT_FILE_TEMPLATE.format( **self.__dict__ ) ) BAT_FILE_TEMPLATE = """rem delete any existing EXE file del {project_name}.exe lazbuild {project_name}.lpi rem Now try to run the EXE file {project_name}.exe """ if __name__ == '__main__': from form import Form from button import Button from labeled_edit import LabeledEdit from layout import Layout from layout import VStackPanel, HStackPanel Lay = VStackPanel(Left=10, Height=0, Top=10, Width=0, TopMargin=10, RightMargin=10, BottomMargin=10, LeftMargin=10) for i in xrange(3): B = Lay.add_widget( Button( widget_name='DoSompin_%i'%i, Left=41+i*5, Height=25, Top=42+i*5, Width=75+i*5, Caption=None, has_OnClick=True) ) print '#%i) bbox ='%i, B.BBox Lay.add_widget(LabeledEdit( label_text='Enter Diameter', widget_name='GetDiam', initial_value='4.56789012345678905678901234567890',
# -*- coding: utf-8 -*- import re from pyload.plugin.Account import Account class StahnuTo(Account): __name = "StahnuTo" __type = "account" __version = "0.05" __description = """StahnuTo account plugin""" __license = "GPLv3" __authors = [("zoidberg", "zoidberg@mujmail
.cz")] def loadAccountInfo(self, user, req): html = req.load("http://www.stahnu.to/") m = re.search(r'>VIP: (\d+.*)<', html) trafficleft = self.parseTraffic(m.group(1)) if m else 0 return {"premium": trafficleft > 512, "trafficleft": trafficleft, "validuntil": -1} def login(self, user, data, req): html = req.load("http://www.stahnu.to/login.php", post={"username": user, "password": data['pass
word'], "submit": "Login"}, decode=True) if not '<a href="logout.php">' in html: self.wrongPassword()
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Super-resolution Training model. This an import only file to provide training helpers. """ import functools import os import numpy as np import tensorflow as tf from absl import flags from libml import utils, layers from libml.data import as_iterator from libml.train import Model, FLAGS, ModelPro flags.DEFINE_integer('scale', 4, 'Scale by which to increase resolution.') flags.DEFINE_string('downscaler', 'average', 'Downscaling method [average, bicubic].') class EvalSessionPro: def __init__(self, model, checkpoint_dir, **params): self.graph = tf.Graph() with self.graph.as_default(): self.global_step = tf.train.get_or_create_global_step() self.ops = model(**params) ckpt = utils.find_latest_checkpoint(checkpoint_dir, 'stage*/model.ckpt-*.meta') self.sess = tf.train.SingularMonitoredSession(checkpoint_filename_with_path=ckpt) class SRES(Model): """Super-Resolution base class.""" def __init__(self, train_dir, scale, downscaler, **kwargs): self.scale = scale self.downscaler = downscaler Model.__init__(self, train_dir, scale=scale, downscaler=downscaler, **kwargs) def experiment_name(self, **kwargs): args = [x + str(y) for x, y in sorted(kwargs.items()) if x not in {'scale', 'downscaler'}] return os.path.join('%s%dX' % (self.downscaler, self.scale), '_'.join([self.__class__.__name__] + args)) @property def log_scale(self): return utils.ilog2(self.scale) def downscale(self, x, scale=None, order=layers.NCHW): scale = scale or self.scale if scale <= 1: return x if self.downscaler == 'average': return layers.downscale2d(x, scale, order) elif self.downscaler == 'bicubic': return layers.bicubic_downscale2d(x, scale, order) else: raise ValueError('Unknown downscaler "%s"' % self.downscaler) def train_step(self, data, ops): x = next(data) self.sess.run(ops.train_op, feed_dict={ops.x: x['x']}) def make_samples(self, dataset, input_op, sres_op, batch=1, width=8, height=16, feed_extra=None): if 'test_hires' not in self.tmp: with dataset.graph.as_default(): it = iter(as_iterator(dataset.test.batch(width * height).take(1).repeat(), dataset.sess)) self.tmp.test_hires = next(it)['x'] hires = self.tmp.test_hires.copy() with tf.Graph().as_default(), tf.Session() as sess_new: lores = sess_new.run(self.downscale(hires)) pixelated = sess_new.run(layers.upscale2d(lores, self.scale))
images = np.concatenate( [ self.tf_sess.run(sres_op, feed_dict={ input_op:
lores[x:x + batch], **(feed_extra or {})}) for x in range(0, lores.shape[0], batch) ], axis=0) images = images.clip(-1, 1) images = np.concatenate([hires, pixelated, images], axis=3) images = utils.images_to_grid(images.reshape((height, width) + images.shape[1:])) return images def add_summaries(self, dataset, ops, feed_extra=None, **kwargs): del kwargs feed_extra = feed_extra.copy() if feed_extra else {} if 'noise' in ops: feed_extra[ops.noise] = 0 def gen_images(): samples = self.make_samples(dataset, ops.y, ops.sres_op, FLAGS.batch, feed_extra=feed_extra) # Prevent summary scaling, force offset/ratio = 0/1 samples[-1, -1] = (-1, 0, 1) return samples samples = tf.py_func(gen_images, [], [tf.float32]) tf.summary.image('samples', samples) def model(self, latent, **kwargs): raise NotImplementedError class SRESPro(ModelPro, SRES): """Progressive Super-Resolution Setup.""" def eval_mode(self, dataset): assert self.eval is None log_scale = utils.ilog2(self.scale) model = functools.partial(self.model, dataset=dataset, total_steps=1, lod_start=log_scale, lod_stop=log_scale, lod_max=log_scale) self.eval = EvalSessionPro(model, self.checkpoint_dir, **self.params) print('Eval model %s at global_step %d' % (self.__class__.__name__, self.eval.sess.run(self.eval.global_step))) return self.eval def train_step(self, data, lod, ops): x = next(data) self.sess.run(ops.train_op, feed_dict={ops.x: x['x'], ops.lod: lod}) def add_summaries(self, dataset, ops, lod_fn, **kwargs): del kwargs def gen_images(): feed_extra = {ops.lod: lod_fn()} if 'noise' in ops: feed_extra[ops.noise] = 0 samples = self.make_samples(dataset, ops.y, ops.sres_op, FLAGS.batch, feed_extra=feed_extra) # Prevent summary scaling, force offset/ratio = 0/1 samples[-1, -1] = (-1, 0, 1) return samples samples = tf.py_func(gen_images, [], [tf.float32]) tf.summary.image('samples', samples)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Simple agent which chooses a random label. Chooses from the label candidates if they are available. If candidates are not available, it repeats the label. """ from typing import Optional from parlai.core.params import ParlaiParser from parlai.core.opt import Opt import random from parlai.core.agents import Agent class RandomCandidateAgent(Agent): """ Agent returns random candidate if available or repeats the label. """ @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command line arguments for this agent. """ parser = parser.add_argument_group('RandomCandidateAgent Arguments') parser.add_argument( '--label_candidates_file', type=str, default=None, help='file of candidate responses to choose from', ) return parser def __init__(self, opt, shared=None): """ Initialize this agent. """ super().__init__(opt) self.id = 'RandomCandidateAgent' random.seed(42) if opt.get('label_candidates_file'): f = open(opt.g
et('label_candidates_file')) self.label_candidates = f.read().split('\n') def act(self): """ Generate response to last seen observation. Replies with a randomly selected candidate if label_candidates or a candidate file are available. Otherwise, repl
ies with the label if they are available. Oterhwise, replies with generic hardcoded responses if the agent has not observed any messages or if there are no replies to suggest. :returns: message dict with reply """ obs = self.observation if obs is None: return {'text': 'Nothing to reply to yet.'} reply = {} reply['id'] = self.getID() label_candidates = obs.get('label_candidates') if hasattr(self, 'label_candidates'): # override label candidates with candidate file if set label_candidates = self.label_candidates if label_candidates: label_candidates = list(label_candidates) random.shuffle(label_candidates) reply['text_candidates'] = label_candidates reply['text'] = label_candidates[0] else: # reply with I don't know. reply['text'] = "I don't know." return reply
from django.shortcuts import render from django.shortcuts import render_to_response from django.template import RequestContext from django.shortcuts import redirect from main.models import Link from main.models import Tag # Create your views here. def index(request): context = RequestContext(request) links = Link.objects.all() return render_to_response('main/index.html', {'links': links}, context) def tags(request): context = RequestContext(request) tags = Tag.objects.all() return render_to_response('main/tags.html', {'tags': tags}, context) def tag(request, tag_name): context = RequestContext(request) the_tag = Tag.objects.get(name=tag_name) links=the_tag.link_set.all() return render_to_response('main/index.html',{'links
':links, 'tag_name': '#' + tag_name}, context) def add_link(req
uest): context = RequestContext(request) if request.method == 'POST': url = request.POST.get("url","") tags = request.POST.get("tags","") title = request.POST.get("title","") tags = tags.split(',') l = Link.objects.get_or_create(title=title, url=url)[0] for x in tags: l.tags.add(Tag.objects.get_or_create(name=x)[0]) return redirect(index)
""" Written by Harry Liu (yliu17) and Tyler Nickerson (tjnickerson) """ import sys import os.path import pprint from classes.bag import Bag from classes.item import Item from classes.constraint import Constraint from classes.csp import CSP from classes.solver import Solver def main(): # Read command line arguments args = sys.argv[1:] # More than 1 argument supplied if len(args) > 1: # Get data inputfilename inputfilename = args[0] # Bags bags = {}
# Items items = {} # Section tracker current_section = 0 # Read each lin
e and add to the examples and output lists if os.path.isfile(inputfilename): with open(inputfilename, "r") as infile: for line in infile: # If the line is a comment, increment the section counter if line[:5].strip() == "#####": current_section += 1 else: # Split the line and remove all tabs, newlines, etc. s = [x.strip() for x in line.split(" ")] if current_section == 1: # Items name = s[0] weight = s[1] items[name] = Item(name, weight) elif current_section == 2: # Bags name = s[0] capacity = s[1] bags[name] = Bag(name, capacity) elif current_section == 3: # Fitting limits lower_bound = s[0] upper_bound = s[1] for b in bags: constraint = Constraint( Constraint.BAG_FIT_LIMIT, bags=[bags[b]], min_items=lower_bound, max_items=upper_bound) bags[b].constraints.append(constraint) elif current_section == 4: # Unary inclusive name = s[0] require_bags = [bags[k] for k in s[1:]] constraint = Constraint(Constraint.UNARY_CONSTRAINT_IN_BAGS, items=[ items[name]], bags=require_bags) items[name].constraints.append(constraint) elif current_section == 5: # Unary exclusive name = s[0] reject_bags = [bags[k] for k in s[1:]] constraint = Constraint(Constraint.UNARY_CONSTRAINT_NOT_IN_BAGS, items=[ items[name]], bags=reject_bags) items[name].constraints.append(constraint) elif current_section == 6: # Binary equals item1 = s[0] item2 = s[1] constraint = Constraint(Constraint.BINARY_CONSTRAINT_EQUALITY, items=[ items[item1], items[item2]]) for i in [item1, item2]: items[i].constraints.append(constraint) elif current_section == 7: # Binary not equals item1 = s[0] item2 = s[1] constraint = Constraint(Constraint.BINARY_CONSTRAINT_INEQUALITY, items=[ items[item1], items[item2]]) for i in [item1, item2]: items[i].constraints.append(constraint) elif current_section == 8: # Binary inclusive item1 = s[0] item2 = s[1] value1 = s[2] value2 = s[3] constraint = Constraint(Constraint.BINARY_CONSTRAINT_INCLUSIVITY, items=[ items[item1], items[item2]], bags=[bags[value1], bags[value2]]) items[item1].constraints.append(constraint) items[item2].constraints.append(constraint) csp = CSP(items, bags) solver = Solver() solution = solver.solve(csp) # Output the solution outputfilename = args[1] with open(outputfilename, 'w') as infile: if solution is not None: keys = list(solution.keys()) keys.sort() for bag in keys: total_weight = sum(items[x].weight for x in solution[bag]) infile.write(bag + " " + " ".join(solution[bag]) + "\n") infile.write ("number of items: " + str(len(solution[bag])) + "\n") infile.write ("total weight " + str(total_weight) + "/" + str(bags[bag].capacity) + "\n") infile.write ("wasted capacity: " + str(bags[bag].capacity - total_weight) + "\n") else: infile.write ("No solution!\n") else: # Throw error when cannot open file print("Input file does not exist.") else: # Show usage when not providing enough argument print("Usage: python main.py <inputfilename> <outputfilename") if __name__ == "__main__": main()
#!/usr/bin/env python """Dump instances for bunny, in Promela and SlugsIn.""" import argparse import itertools import pprint import logging import re from tugs import utils log = logging.getLogger(__name__) INPUT_FILE = 'bunny.pml' PROMELA_PATH = 'pml/bunny_many_goals_{i}.txt' SLUGSIN_PATH = 'slugsin/bunny_many_goals_{i}.txt' def dump_promela(n, m): """Dump instances of Promela.""" for i in xrange(n, m): code = make_promela(i) promela_file = PROMELA_PATH.format(i=i) with open(promela_file, 'w') as f: f.write(code) log.info('dumped Promela for {i} masters'.format(i=i)) def dump_slugsin(n, m): for i in xrange(n, m):
promela_file = PROMELA_PATH.format(i=i) with open(promela_file, 'r') as f: pml_code = f.read() slugsin_code = utils.translate_promela_to_slugsin(pml_code) slugsin_file = SLUGSIN_PATH.format(i=i) with open(slugsin_file, 'w') as f: f
.write(slugsin_code) log.info('dumped SlugsIn for {i} masters'.format(i=i)) def make_promela(n): """Return Promela code for instance with size `n`.""" fname = INPUT_FILE with open(fname, 'r') as f: s = f.read() # set number of cells newline = '#define H {n}'.format(n=n) code = re.sub('#define H.*', newline, s) newline = '#define W {m}'.format(m=n-1) code = re.sub('#define W.*', newline, code) # add multiple weak fairness assumptions code += form_progress(n) return code def form_progress(n): """Return conjunction of LTL formulae for progress.""" g0 = ('[]<>((x == 0) && (y == {k}))'.format(k=k) for k in xrange(n)) g1 = ('[]<>((x == {n}) && (y == {k}))'.format(k=k, n=n) for k in xrange(n)) c = itertools.chain(g0, g1) prog = ' && '.join(c) return 'assert ltl { ' + prog + ' }' def main(): # log fh = logging.FileHandler('code_generator_log.txt', mode='w') log.addHandler(fh) log.setLevel(logging.DEBUG) # tugs log log1 = logging.getLogger('tugs.utils') log1.addHandler(fh) log1.setLevel(logging.DEBUG) # record env versions = utils.snapshot_versions() log.info(pprint.pformat(versions)) # args p = argparse.ArgumentParser() p.add_argument('--min', type=int, help='from this # of masters') p.add_argument('--max', type=int, help='to this # of masters') args = p.parse_args() n = args.min m = args.max + 1 dump_promela(n, m) dump_slugsin(n, m) if __name__ == '__main__': main()
tr
y: from setuptools import setup except ImportError: from distutils.core import setup config = { 'description':'End to end solution for bitcoin data gathering, backtesting, and live trading', 'author': 'ross palmer', 'url':'http://rosspalmer.github.io/bitQuant/', 'license':'MIT', 'version': '0.2.10', 'install_requires': ['SQLAlchemy','pandas','numpy','scipy','PyMySQL'], 'packages': ['bitquant'], 'scripts': [], 'name':'bitquant' } setup(**config)
# -*- encoding: utf-8 -*- import os import subprocess def graph( expr, image_format='pdf', layout='dot', graph_attributes=None, node_attributes=None, edge_attributes=None, **kwargs ): r'''Graphs `expr` with graphviz and opens resulting image in the default image viewer. :: >>> rtm_syntax = '(3 ((2 (2 1)) 2))' >>> rhythm_tree = rhythmtreetools.RhythmTreeParser()(rtm_syntax)[0] >>> print(rhythm_tree.pretty_rtm_format) (3 ( (2 ( 2 1)) 2)) :: >>> topleveltools.graph(rhythm_tree) # doctest: +SKIP Returns none. ''' from abjad import abjad_configuration from abjad.tools import systemtools if isinstance(expr, str): graphviz_format = expr else: assert hasattr(expr, '__graph__') graphviz_graph = expr.__graph__(**kwargs) if graph_attributes: graph.attributes.update(graph_attributes) if node_attributes: graph.node_attributes.update(node_attributes) if edge_attributes: graph.edge_attributes.update(edge_attributes) graphviz_forma
t = str(graphviz_graph) assert image_format in ('pdf', 'png') valid_layouts = ( 'circo', 'dot', 'fdp', 'neato', 'osag
e', 'sfdp', 'twopi', ) assert layout in valid_layouts message = 'cannot find `{}` command-line tool.' message = message.format(layout) message += ' Please download Graphviz from graphviz.org.' assert systemtools.IOManager.find_executable(layout), message ABJADOUTPUT = abjad_configuration['abjad_output_directory'] systemtools.IOManager._ensure_directory_existence(ABJADOUTPUT) dot_path = os.path.join( ABJADOUTPUT, systemtools.IOManager.get_next_output_file_name(file_extension='dot'), ) img_path = os.path.join(ABJADOUTPUT, dot_path.replace('dot', 'pdf')) with open(dot_path, 'w') as f: f.write(graphviz_format) command = '{} -v -T{} {} -o {}' command = command.format(layout, image_format, dot_path, img_path) subprocess.call(command, shell=True) pdf_viewer = abjad_configuration['pdf_viewer'] ABJADOUTPUT = abjad_configuration['abjad_output_directory'] systemtools.IOManager.open_file(img_path, pdf_viewer)
"""NDG XACML ElementTree based reader for subject match type NERC DataGrid """ __author__ = "P J Kershaw" __date__ = "16/03/10" __copyright__ = "(C) 2010 Science and Technology Facilities Council" __contact__ = "Philip.Kershaw@stfc.ac.uk" __license__ = "BSD - see LICENSE file in top-level directory" __contact__ = "Philip.Kershaw@stfc.ac.uk" __revision__ = "$Id$" from ndg.xacml.core.match import SubjectMatch from ndg.xacml.core.attributedesignator import SubjectAttributeDesignator from ndg.xacml.parsers.etree.matchreader import MatchReaderBase class SubjectMatchReader(Match
ReaderBase): """ElementTree based parser for XACML SubjectMatch @cvar TYPE: XACM
L class type that this reader will read values into @type TYPE: abc.ABCMeta @cvar ATTRIBUTE_DESIGNATOR_TYPE: type for attribute designator sub-elements @type ATTRIBUTE_DESIGNATOR_TYPE: abc.ABCMeta """ TYPE = SubjectMatch ATTRIBUTE_DESIGNATOR_TYPE = SubjectAttributeDesignator
self.assertTrue(zone.domain.startswith('auroradns')) def test_create_zone(self): zone = self.driver.create_zone('example.com') self.assertEqual(zone.domain, 'example.com') def test_get_zone(self): zone = self.driver.get_zone('example.com') self.assertEqual(zone.domain, 'example.com') self.assertEqual(zone.id, 'ffb62570-8414-4578-a346-526b44e320b7') def test_delete_zone(self): zone = self.driver.get_zone('example.com') self.assertTrue(self.driver.delete_zone(zone)) def test_create_record(self): zone = self.driver.get_zone('example.com') record = zone.create_record(name='localhost', type=RecordType.A, data='127.0.0.1', extra={'ttl': 900}) self.assertEqual(record.id, '5592f1ff') self.assertEqual(record.name, 'localhost') self.assertEqual(record.data, '127.0.0.1') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.extra['ttl'], 900) def test_get_record(self): zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') self.assertEqual(record.id, '5592f1ff') self.assertEqual(record.name, 'localhost') self.assertEqual(record.data, '127.0.0.1') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.extra['ttl'], 900) self.assertEqual(record.extra['priority'], None) def test_update_record(self): ttl = 900 zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') record = record.update(extra={'ttl': ttl}) self.assertEqual(record.extra['ttl'], ttl) def test_delete_record(self): zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') self.assertTrue(record.delete()) def test_list_records(self): zone = self.driver.get_zone('example.com') for record in zone.list_records(): self.assertEqual(record.extra['ttl'], 3600) self.assertEqual(record.extra['disabled'], False) def test_get_zone_non_exist(self): try: self.driver.get_zone('nonexists.example.com') self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise def test_delete_zone_non_exist(self): try: self.driver.delete_zone(Zone(id=1, domain='nonexists.example.com', type='NATIVE', driver=AuroraDNSDriver, ttl=3600)) self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise def test_create_zone_already_exist(self): try: self.driver.create_zone('exists.example.com') self.fail('expected a ZoneAlreadyExistsError') except ZoneAlreadyExistsError: pass except Exception: raise def test_list_records_non_exist(self): try: self.driver.list_records(Zone(id=1, domain='nonexists.example.com', type='NATIVE', driver=AuroraDNSDriver, ttl=3600)) self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise def test_get_record_non_exist(self): try: self.driver.get_record(1, 1) self.fail('expected a RecordDoesNotExistError') except RecordDoesNotExistError: pass except Exception: raise def test_creat
e_health_check(self): zone = self.dri
ver.get_zone('example.com') type = AuroraDNSHealthCheckType.HTTP hostname = "www.pcextreme.nl" ipaddress = "109.72.87.252" port = 8080 interval = 10 threshold = 3 check = self.driver.ex_create_healthcheck(zone=zone, type=type, hostname=hostname, port=port, path=None, interval=interval, threshold=threshold, ipaddress=ipaddress) self.assertEqual(check.interval, interval) self.assertEqual(check.threshold, threshold) self.assertEqual(check.port, port) self.assertEqual(check.type, type) self.assertEqual(check.hostname, hostname) self.assertEqual(check.path, "/") self.assertEqual(check.ipaddress, ipaddress) def test_list_health_checks(self): zone = self.driver.get_zone('example.com') checks = self.driver.ex_list_healthchecks(zone) self.assertEqual(len(checks), 3) for check in checks: self.assertEqual(check.interval, 60) self.assertEqual(check.type, AuroraDNSHealthCheckType.HTTP) class AuroraDNSDriverMockHttp(MockHttp): fixtures = DNSFileFixtures('auroradns') def _zones(self, method, url, body, headers): if method == 'POST': body_json = json.loads(body) if body_json['name'] == 'exists.example.com': return (httplib.CONFLICT, body, {}, httplib.responses[httplib.CONFLICT]) body = self.fixtures.load('zone_example_com.json') else: body = self.fixtures.load('zone_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _zones_HTTP_FORBIDDEN(self, method, url, body, headers): body = "{}" return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN]) def _zones_example_com(self, method, url, body, headers): body = None if method == 'GET': body = self.fixtures.load('zone_example_com.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _zones_nonexists_example_com(self, method, url, body, headers): return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _zones_ffb62570_8414_4578_a346_526b44e320b7(self, method, url, body, headers): body = self.fixtures.load('zone_example_com.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _zones_ffb62570_8414_4578_a346_526b44e320b7_records(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('zone_example_com_record_localhost.json') else: body = self.fixtures.load('zone_example_com_records.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _zones_ffb62570_8414_4578_a346_526b44e320b7_health_checks(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('zone_example_com_health_check.json') else: body = self.fixtures.load('zone_example_com_health_checks.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _zones_1(self, method, url, body, headers): return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _zones_1_records(self, method, url, body, headers): return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _zones_1_records_1(self, method, url, body, headers): return (httplib.NOT_FOUND, body, {}, ht
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.utils import translation import pytest from shoop.front.apps.simple_search.views import get_search_product_ids, SearchView from shoop.testing.fac
tories import get_default_product, get_default_shop, create_product from shoop.testing.utils import appl
y_request_middleware UNLIKELY_STRING = "TJiCrQWaGChYNathovfViXPWO" NO_RESULTS_FOUND_STRING = "No results found" @pytest.mark.django_db def test_simple_search_get_ids_works(rf): prod = get_default_product() bit = prod.name[:5] request = rf.get("/") assert prod.pk in get_search_product_ids(request, bit) assert prod.pk in get_search_product_ids(request, bit) # Should use cache @pytest.mark.django_db def test_simple_search_view_works(rf): view = SearchView.as_view() prod = create_product(sku=UNLIKELY_STRING, shop=get_default_shop()) query = prod.name[:8] # This test is pretty cruddy. TODO: Un-cruddify this test. resp = view(apply_request_middleware(rf.get("/"))) assert query not in resp.rendered_content resp = view(apply_request_middleware(rf.get("/", {"q": query}))) assert query in resp.rendered_content @pytest.mark.django_db def test_simple_search_no_results(rf): with translation.override("xx"): # use built-in translation get_default_shop() view = SearchView.as_view() resp = view(apply_request_middleware(rf.get("/", {"q": UNLIKELY_STRING}))) assert NO_RESULTS_FOUND_STRING in resp.rendered_content resp = view(apply_request_middleware(rf.get("/"))) assert NO_RESULTS_FOUND_STRING not in resp.rendered_content
import sys import numpy import math from foldyFloatList import foldyFloatList class OOBError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) from KMCLib.PluginInterfaces.KMCAnalysisPlugin import KMCAnalysisPlugin from KMCLib.Utilities.CheckUtilities import checkSequenceOfPositiveIntegers from KMCLib.Utilities.CheckUtilities import checkPositiveFloat from KMCLib.Utilities.CheckUtilities import checkPositiveInteger from KMCLib.Exceptions.Error import Error from KMCLib.Backend.Backend import MPICommons class DensHist(KMCAnalysisPlugin): def __init__(self, spec=None, inProc=None, outProc=None): self.__spec = spec msg = "The 'inProc' parameter must be given as a list of relevant input processes." self.__inProc = checkSequenceOfPositiveIntegers(inProc, msg) msg = "The 'outProc' parameter must be given as a list of relevant output processes." self.__outProc = checkSequenceOfPositiveIntegers(outProc, msg) self.__initTime = 0.0 self.__lastTime = 0.0 self.__currentTime = 0.0 def setup(self, step, time, configuration): self.__initTime = time typeList = configuration.types() self.__histSize = len(typeList) self.__histogram = [] for i in range(0, self.__histSize): self.__histogram.append(foldyFloatList()) total = 0 for i in typeList: if i in self.__spec: total += 1 self.__currTot = total self.__lastTime = time self.__currentTime = time def registerStep(self, step, time, configuration): self.__currentTime = time if configuration.latestEventProcess() in self.__inProc: self.__currTot += 1 if configuration.latestEventProcess() in self.__outProc: self.__currTot -= 1 if self.__currTot < 0 or self.__currTot > self.__histSize: raise OOBError(0)
self.__histogram[self.__currTot].addValue(self.__currentTime
- self.__lastTime) self.__lastTime = time def finalize(self): self.__lastTime = self.__currentTime self.__finalHist = [] totalWeight = foldyFloatList() for data in self.__histogram: temp = data.extractSum() totalWeight.addValue(temp) self.__finalHist.append(temp) ovTot = totalWeight.extractSum() for index in range(0, self.__histSize): self.__finalHist[index] = self.__finalHist[index]/ovTot def printResults(self, stream=sys.stdout): if MPICommons.isMaster(): for index in range(0, self.__histSize): stream.write(str(index)+" "+"{:.6E}".format(self.__finalHist[index])+"\n")
lic License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. # ######################################################################## import argparse import json import logging from operator import attrgetter import os import re import sys import platform import ctypes import pefile import config from ThreadedCommand import ThreadedCommand LOG = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format="%(asctime)-15s %(message)s") def get_system_info(): platform_info = platform.uname() version_info = sys.getwindowsversion() if sys.version > '3': os_name = "%s %s %s (%s)" % (platform_info.system, platform_info.release, version_info.service_pack, platform_info.machine) os_version = platform_info.version else: os_name = "%s %s %s (%s)" % (platform_info[0], platform_info[2], version_info[4], platform_info[4]) os_version = platform_info[3] return os_name, os_version # courtesy of http://stackoverflow.com/a/16076661 def loword(dword): return dword & 0x0000ffff def hiword(dword): return dword >> 16 def get_product_version(pe): try: ms = pe.VS_FIXEDFILEINFO.ProductVersionMS ls = pe.VS_FIXEDFILEINFO.ProductVersionLS return "%d.%d.%d.%d" % (hiword(ms), loword(ms), hiword(ls), loword(ls)) except AttributeError: return "0.0.0.0" def check_aslr(): # first check for a potentially rebased user32.dll from ctypes import windll from ctypes import wintypes check_dlls = ["user32.dll", "kernel32.dll", "ntdll.dll"] offsets = [] is_aslr = False windll.kernel32.GetModuleHandleW.restype = wintypes.HMODULE windll.kernel32.GetModuleHandleW.argtypes = [wintypes.LPCWSTR] windll.kernel32.GetModuleFileNameW.restype = wintypes.DWORD windll.kernel32.GetModuleFileNameW.argtypes = [wintypes.HANDLE, wintypes.LPWSTR, wintypes.DWORD] for dll_name in check_dlls: h_module_base = windll.kernel32.GetModuleHandleW(dll_name) # next get
the module's file path module_p
ath = ctypes.create_unicode_buffer(255) windll.kernel32.GetModuleFileNameW(h_module_base, module_path, 255) # then the ImageBase from python.exe file pe = pefile.PE(module_path.value) pe_header_base_addr = pe.OPTIONAL_HEADER.ImageBase offsets.append(pe_header_base_addr - h_module_base) for dll_name, offset in zip(check_dlls, offsets): LOG.debug("Memory vs. File ImageBase offset (%s): 0x%x", dll_name, offset) is_aslr |= offset != 0 return is_aslr class DatabaseBuilder(object): def _extractPeExports(self, filepath): try: pe = pefile.PE(filepath) if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"): dll_entry = {} dll_entry["base_address"] = pe.OPTIONAL_HEADER.ImageBase dll_entry["bitness"] = 32 if pe.FILE_HEADER.Machine == 0x14c else 64 dll_entry["version"] = get_product_version(pe) dll_entry["filepath"] = filepath dll_entry["aslr_offset"] = 0 dll_entry["exports"] = [] min_addr = sys.maxsize max_addr = 0 for exp in sorted(pe.DIRECTORY_ENTRY_EXPORT.symbols, key=attrgetter("address")): export_info = {} min_addr = min(pe.OPTIONAL_HEADER.ImageBase + exp.address, min_addr) max_addr = max(pe.OPTIONAL_HEADER.ImageBase + exp.address, max_addr) export_info["address"] = exp.address if exp.name == None: export_info["name"] = "None" else: export_info["name"] = exp.name.decode("utf-8") export_info["ordinal"] = exp.ordinal dll_entry["exports"].append(export_info) return dll_entry except Exception as exc: return None def _buildDllKey(self, dll_info): filename = os.path.basename(dll_info["filepath"]) return "{}_{}_{}_0x{:x}".format(dll_info["bitness"], dll_info["version"], filename, dll_info["base_address"]) def _isInFilter(self, target_dll, filter_dlls): # since we want to maintain compatibility with Python 2.7, we can't casefold - upper+lower should suffice though. for check_dll in filter_dlls: if target_dll.upper().lower() == check_dll.upper().lower(): return True return False def extractRecursively(self, paths, filter_dlls=False): api_count = 0 pe_count = 0 duplicate_count = 0 skipped_count = 0 num_hit_dlls = 0 api_db = {"dlls": {}} if paths is None: paths = config.DEFAULT_FOLDERS for base in paths: if not os.path.isdir(base): LOG.warn("%s is not a directory, skipping...", base) continue for root, _, files in os.walk(base): for fn in files: if filter_dlls and not self._isInFilter(fn, config.DLL_FILTER): skipped_count += 1 continue elif not (fn.lower().endswith(".dll") or fn.lower().endswith(".drv") or fn.lower().endswith(".mui")): continue pe_count += 1 LOG.info("processing: %s %s", root, fn) dll_summary = self._extractPeExports(root + os.sep + fn) if dll_summary is not None: dll_key = self._buildDllKey(dll_summary) if dll_key not in api_db["dlls"]: api_db["dlls"][dll_key] = dll_summary num_hit_dlls += 1 api_count += len(dll_summary["exports"]) LOG.info("APIs: %d", len(dll_summary["exports"])) else: duplicate_count += 1 LOG.info("PEs examined: %d (%d duplicates, %d skipped)", pe_count, duplicate_count, skipped_count) LOG.info("Successfully evaluated %d DLLs with %d APIs", num_hit_dlls, api_count) api_db["os_name"], api_db["os_version"] = get_system_info() api_db["aslr_offsets"] = False api_db["num_dlls"] = num_hit_dlls api_db["num_apis"] = api_count api_db["crawled_paths"] = paths api_db["filtered"] = filter_dlls return api_db def extractAslrOffsets(self, api_db): LOG.info("Now check for ASLR...") if check_aslr(): LOG.info(" looks like ASLR is active, let's extract some offsets!") num_offsets = {32: 0, 64: 0} for dll_key in api_db["dlls"]: dll = api_db["dlls"][dll_key] if dll["bitness"] in [32, 64]: offset = self.getAslrOffsetForDll(dll) dll["aslr_offset"] = offset if offset: num_offsets[dll["bitness"]] += 1 LOG.info("Found %d 32bit and %d 64bit ASLR offsets.", num_offsets[32], num_offsets[64]) api_db["aslr_offsets"] = True return api_db def getAslrOffsetForDll(self, dll_entry): this_file = str(os.path.abspath(__file__)) basechecker = "DllBaseChecker{}.exe".format(dll_entry["bitness"]) basechecker_path = os.path.abspath(os.sep.join([this_file, "..", "DllBaseChecker", basecheck
import unittest import transaction from pyramid import testing from .models import DBSession class TestMyViewSuccessCondition(unittest.TestCase): def setUp(self): self.config = testing.setUp() from sqlalchemy import create_engine engine = create_engine('sqlite://') from .models import ( Base, MyModel, ) DBSession.configure(bind=engine) Base.metadata.create_all(engine) with transaction.manager: model = MyModel(name='one', value=55) DBSession.add(model) def tearDown(self): DBSession.remove() testing.tearDown() def test_passing_view(self): from .views import my_view request = testing.DummyRequest() info = my_view(request) self.assertEqual(info['one'].name, 'one') self.assertEqual(info['project'], 'nvb-client') class TestMyViewFailureCondition(unittest.TestCase): def setUp(self): self.config = testing.setUp() from sqlalchemy import create_engine
engine = create_engine('sqlite://') from .models import ( Base, MyModel, ) DBSession.configure(bind=engine) def tearDown(self): DBSession.remove() testing.tearDown() def test_failing_view(self): from .views import my_view request = testing.DummyRe
quest() info = my_view(request) self.assertEqual(info.status_int, 500)
import pytest import re from django.contrib.auth import get_user_model from django.contrib.auth.models import User from django.core.urlresolvers import reverse from shuup.core.models import SavedAddress from shuup.core.models import get_person_contact from shuup.core.models._contacts import get_company_contact from shuup.testing.factories import get_default_shop, get_address from shuup_tests.utils import SmartClient from shuup_tests.utils.fixtures import regular_user, REGULAR_USER_PASSWORD, REGULAR_USER_USERNAME regular_user = regular_user # noqa def default_address_data(): return { "saved_address-title": "Fakerr", "saved_address-role": "1", "saved_address-status": "1", "address-name": "Derpy Test", "address-street": "Derp-street", "address-city": "Los Angeles", "address-region_code": "CA", "address-postal_code": "90000", "address-country": "US", } def initialize_test(regular_user, person=True): client = SmartClient() get_default_shop() if person: contact = get_person_contact(regular_user) else: contact = get_company_contact(regular_user) client.login(username=REGULAR_USER_USERNAME, password=REGULAR_USER_PASSWORD) return client, contact @pytest.mark.django_db def test_addressbook_no_address(regular_user): client, contact = initialize_test(regular_user) addressbook_url = reverse("shuup:address_book") response, soup = client.response_and_soup(addressbook_url) assert not len(soup(text="Name:")) @pytest.mark.django_db def test_addressbook_has_addresses(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() billing_name = address.name contact.default_billing_address = address contact.save() addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) assert len(soup(text="Name:")) == 1 elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % billing_name] assert len(elems) == 1 address = get_address(**{"name": "Kek Bur"}) address.save() shipping_name = address.name contact.default_shipping_address = address contact.save() soup = client.soup(addressbook_url) elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % billing_name] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % shipping_name] assert len(elems) == 1 @pytest.mark.django_db def test_addressbook_has_saved_addresses(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() address_title = "TestAddress" sa = SavedAddress.objects.create(owner=contact, address=address, title=address_title) addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == address_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 1 second_address_title = "TestAddress2" sa = SavedAddress.objects.create(owner=contact, address=address, title=second_address_title) soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == second_address_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 @pytest.mark.django_db def test_addressbook_addresses_create_and_edit(regular_user): client, contact = initialize_test(regular_user) new_address_url = reverse("shuup:address_book_new") soup = client.soup(new_address_url) data = default_address_data() response, soup = client.response_and_soup(new_address_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 1 assert SavedAddress.objects.first().owner == contact addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == data.get("saved_address-title")] assert len(elems) == 1 assert len(soup(text="Name:")) == 1 new_title = "Test Title" soup = client.soup(new_address_url) data.update({"saved_address-title": new_title}) response, soup = client.response_and_soup(new_address_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 2 sa = SavedAddress.objects.last() assert sa.owner == contact assert sa.title == new_title soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == new_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 # edit old updated_title = "Updated Title" edit_url = reverse("shuup:address_book_edit", kwargs={"pk": sa.pk}) soup = client.soup(edit_url) data.update({"saved_address-title": updated_title}) response, soup = client.response_and_soup(edit_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 2 sa = SavedAddress.objects.last() assert sa.owner == contact assert sa.title == updated_title soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == updated_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 @pytest.mark.django_db def delete_address(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() sa = SavedAddress.objects.create(owner=contact, address=address) delete_url = reverse("shuup:address_book_delete", kwargs={"pk":sa.pk}) response, soup = client.response_and_soup(delete_url) assert respon
se.status_code == 302 assert "
Cannot remove address" not in soup user = User.objects.create_user('john', 'doe@example.com', 'doepassword') contact2 = get_person_contact(user) address2 = get_address() address2.save() sa2 = SavedAddress.objects.create(owner=contact2, address=address2) response, soup = client.response_and_soup(delete_url) assert response.status_code == 302 assert "Cannot remove address" in soup
format_testing_audio = "audio_pipeline\\test\\test_files\\audio\\tag_test_files" write_testing_audio = "audio_pipeline\\test\\test_files\\audio\\write_test_files" release_mbids = "audio_pipeline/test/test_files/test_mbids/release_mbids.json" artist_mbids = "audio_pipeline/test/test_files/test_mbids/artist_mbids.json" mb_dir = "audio_pipeline/test/test_files/mb_lookups" t1_tags = {'tracktotal': 12, 'album': 'Who Killed...... The Zutons?', 'encoder settings': '-compression-level-5', 'encoder': '(FLAC 1.2.1)', 'albumartist': 'The Zutons', 'label': 'Deltasonic', 'date': '2004-04-19', 'source': 'CD (Lossless)', 'discnumber': 1, 'accurateripdiscid': '012-0011f4ba-00a8233b-8809700c-4', 'batchid': '50024', 'encoded by': 'dBpoweramp Release 14.4', 'title': 'Confusion', 'accurateripresult': 'AccurateRip: Accu
rate (confidence 62) [37DEB629]', 'artist': 'The Zutons', 'tracknumber': 4, 'disctotal': 1, 'genre': 'Rock', 'mbid': '5560ffa9-3824-44f4-b2bf-a96ae4864187', 'length': '0:07', 'item_code': '8b3b7f33-4e8c-4146-90b7-96611863d133', 'obscenity': 'RED DOT', 'send to enco': 'yes', 'rotation status': 'heavy', 'style': 'Bluegrass'} picard_tags = {'tracknumber': 6, 'tot
altracks': 13, 'encoded by': 'dBpoweramp Release 14.4', 'media': 'CD', 'source': 'CD (Lossless)', 'releasestatus': 'official', 'script': 'Latn', 'accurateripresult': 'AccurateRip: Not in database 7CF59426', 'musicbrainz_trackid': '89715e73-cfa8-487f-8aa1-18c3b7d965b9', 'releasecountry': 'GB', 'mbid': '232775fc-277d-46e5-af86-5e01764abe5a', 'musicbrainz_releasetrackid': 'fe85af54-9982-34cc-9e0a-8d4d13a12350', 'disctotal': 1, 'artist': 'Rudi Zygadlo', 'discnumber': 1, 'artists': 'Rudi Zygadlo', 'albumartistsort': 'Zygadlo, Rudi', 'musicbrainz_albumartistid': '48f12b43-153e-42c3-b67c-212372cbfe2b', 'releasetype': 'album', 'batchid': '50024', 'accurateripdiscid': '013-0014462a-00cb7579-bf0a3e0d-6', 'tracktotal': 13, 'catalognumber': 'ZIQ320CD', 'artistsort': 'Zygadlo, Rudi', 'encoder': '(FLAC 1.2.1)', 'musicbrainz_releasegroupid': '06d97cd5-75a4-4ec8-afe3-1127b688c6ee', 'musicbrainz_artistid': '48f12b43-153e-42c3-b67c-212372cbfe2b', 'totaldiscs': 1, 'album': 'Tragicomedies', 'originaldate': '2012-09-17', 'label': 'Planet Mu', 'date': '2012-09-17', 'title': 'The Domino Quivers', 'albumartist': 'Rudi Zygadlo', 'encoder settings': '-compression-level-5', 'originalyear': '2012', 'length': '0:07', 'item_code': '89715e73-cfa8-487f-8aa1-18c3b7d965b9', 'obscenity': 'RED DOT'} unknown_tags = {'accurateripresult': 'AccurateRip: Not in database 7A470C62', 'source': 'CD (Lossless) >> Perfect (Lossless) m4a', 'artist': 'Unknown Artist', 'disctotal': 1, 'tracktotal': 12, 'accurateripdiscid': '012-0010ae26-009c5221-8e08ec0c-4', 'encoded by': 'dBpoweramp Release 14.4', 'encoder': '(FLAC 1.2.1)', 'title': 'Track04', 'tracknumber': 4, 'discnumber': 1, 'length': '0:07'} empty_tags = {}
import numpy as np import pandas as pd from ElectionsTools.Seats_assignation import DHondt_assignation from previous_elections_spain_parser import * import os pathfiles = '../data/spain_previous_elections_results/provincia/' pathfiles = '/'.join(os.path.realpath(__file__).split('/')[:-1]+[pathfiles]) fles = [pathfiles+'PROV_02_197706_1.xlsx', pathfiles+'PROV_02_197903_1.xlsx', pathfiles+'PROV_02_198210_1.xlsx', pathfiles+'PROV_02_198606_1.xlsx', pathfiles+'PROV_02_198910_1.xlsx', pathfiles+'PROV_02_199306_1.xlsx', pathfiles+'PROV_02_199603_1.xlsx', pathfiles+'PROV_02_200003_1.xlsx', pathfiles+'PROV_02_200403_1.xlsx', pathfiles+'PROV_02_200803_1.xlsx', pathfiles+'PROV_02_201111_1.xlsx'] years = [1977, 1979, 1982, 1986, 1989, 1993, 1996, 2000, 2004, 2008, 2011] def compute_diputes_DHont(filename): ## 1. Parse circ, parties, votes, diputes = parse_data_elecciones_esp(filename) circ_com, votes_com, dips_com = collapse_by_col(circ, votes, diputes, 0) circ_sp, votes_sp, dips_sp = collapse_by_col(circ, votes, diputes, None) votes_sp = votes_sp.reshape(1,len(parties)) ## 2. Assignation objects assign = DHondt_assignation(diputes.sum(1)) assign1 = DHondt_assignation(dips_com.sum(1)) assign2 = DHondt_assignation(np.array([dips_sp.sum(0)])) ## 3. Compute assignations d, price = assign.assignation(pd.DataFrame(votes, columns=parties)) d1, price1 = assign1.assignation(pd.DataFrame(votes_com, columns=parties)) d2, price2 = assign2.assignation(pd.DataFrame(votes_sp, columns=parties)) re
turn d, d1, d2, parties def prepare2export(d, d1, d2, parties): logi = np.logical
_or(np.logical_or(d.sum(0)>0, d1.sum(0)>0), d2.sum(0)>0) parties = [parties[i] for i in np.where(logi)[0]] d, d1, d2 = d[:, logi].sum(0), d1[:, logi].sum(0), d2[:, logi].sum(0) return d, d1, d2, parties def compute_all_year(year): filename = fles[years.index(year)] d, d1, d2, parties = compute_diputes_DHont(filename) exp_d, exp_d1, exp_d2, exp_parties = prepare2export(d, d1, d2, parties) return exp_d, exp_d1, exp_d2, exp_parties def compute_table_all_years(year): d1, d2, d3, cols = compute_all_year(year) d1, d2, d3 = pd.DataFrame(d1), pd.DataFrame(d2), pd.DataFrame(d3) ind = ['Dhont_estado', 'Dhont_comunidad', 'Dhont_provincia'] exp = pd.concat([d1.T, d2.T, d3.T], axis=0) exp.columns = cols exp.index = ind return exp
#!/usr/bin/env python3 # this copies over all files in admin0:~/stripe/ to the ~/stripe folder in the statistics project import sys import
os s
ys.path.insert(0, os.path.expanduser("~/bin/")) os.chdir(os.path.join(os.environ['SMC_ROOT'], "smc-build/smc-ansible")) # host of statistics project from smc_rethinkdb import project_host host = project_host("7561f68d-3d97-4530-b97e-68af2fb4ed13") src = os.path.expanduser("~/stripe/") # push to the project via ansible and set the permissions os.system('ansible %s -m copy -a "src=%s dest=/projects/7561f68d-3d97-4530-b97e-68af2fb4ed13/stripe/ owner=1078872008 group=1078872008 mode=u=rw,go=" -become' % (host, src))
# Functions and Variables def cheese_and_crackers(cheese_count, boxes_of_crackers): print "You have %d cheeses!" % cheese_count print "You have %d boxes of crackers!" % boxes_of_crackers print "Man that's enough for a party!" print "Get a blanket.\n" print "We can just give the function numbers directly:" cheese_and_crackers(20, 30) print "OR, we can use variable from our script:" amount_of_cheese = 10 amount_of_crackers = 50 cheese_an
d_crackers(amount_of_cheese, amount_of_crackers) print "We can even do math inside too:" cheese_and_crackers(10 + 20, 5 + 6) print "And we can combine the two, variables and math:" cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers +
1000)
import unittest from katas.beta.number_to_bytes import to_bytes class NumberToBytesTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(to_bytes(0), ['00000000']) def test_equals_2(self): self.assertEqual(to_bytes(1), ['00000001']) def test_equals_3(self):
self.assertEqual(to_bytes(257), ['00000001', '00000001']) def test_equals_4(self): self.assertEqual(to_bytes(0x101), ['00000001', '00000001']) def test_equals_
5(self): self.assertEqual(to_bytes(0x000000000101), ['00000001', '00000001']) def test_equals_6(self): self.assertEqual(to_bytes(0xffff), ['11111111', '11111111']) def test_equals_7(self): self.assertEqual(to_bytes(0x1020304), ['00000001', '00000010', '00000011', '00000100'])
# -*- coding: utf-8 -*- from app import cre
ate_app forms_app = create_app()