code
stringlengths
1
199k
import os from Cerebrum.default_config import * CEREBRUM_DATABASE_NAME = os.getenv('DB_NAME') CEREBRUM_DATABASE_CONNECT_DATA['user'] = os.getenv('DB_USER') CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER') CEREBRUM_DATABASE_CONNECT_DATA['host'] = os.getenv('DB_HOST') CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] = os.getenv('DB_USER') CEREBRUM_DDL_DIR = '/src/design' DB_AUTH_DIR = '/db-auth' LOGGING_CONFIGFILE = os.path.join(os.getenv('TEST_CONFIG_DIR'), 'logging.ini')
""" Mirror a remote ftp subtree into a local directory tree. usage: ftpmirror [-v] [-q] [-i] [-m] [-n] [-r] [-s pat] [-l username [-p passwd [-a account]]] hostname[:port] [remotedir [localdir]] -v: verbose -q: quiet -i: interactive mode -m: macintosh server (NCSA telnet 2.4) (implies -n -s '*.o') -n: don't log in -r: remove local files/directories no longer pertinent -l username [-p passwd [-a account]]: login info (default .netrc or anonymous) -s pat: skip files matching pattern hostname: remote host w/ optional port separated by ':' remotedir: remote directory (default initial) localdir: local directory (default current) """ import os import sys import time import getopt import ftplib import netrc from fnmatch import fnmatch def usage(*args): sys.stdout = sys.stderr for msg in args: print(msg) print(__doc__) sys.exit(2) verbose = 1 # 0 for -q, 2 for -v interactive = 0 mac = 0 rmok = 0 nologin = 0 skippats = ['.', '..', '.mirrorinfo'] def main(): global verbose, interactive, mac, rmok, nologin try: opts, args = getopt.getopt(sys.argv[1:], 'a:bil:mnp:qrs:v') except getopt.error as msg: usage(msg) login = '' passwd = '' account = '' if not args: usage('hostname missing') host = args[0] port = 0 if ':' in host: host, port = host.split(':', 1) port = int(port) try: auth = netrc.netrc().authenticators(host) if auth is not None: login, account, passwd = auth except (netrc.NetrcParseError, IOError): pass for o, a in opts: if o == '-l': login = a if o == '-p': passwd = a if o == '-a': account = a if o == '-v': verbose = verbose + 1 if o == '-q': verbose = 0 if o == '-i': interactive = 1 if o == '-m': mac = 1; nologin = 1; skippats.append('*.o') if o == '-n': nologin = 1 if o == '-r': rmok = 1 if o == '-s': skippats.append(a) remotedir = '' localdir = '' if args[1:]: remotedir = args[1] if args[2:]: localdir = args[2] if args[3:]: usage('too many arguments') # f = ftplib.FTP() if verbose: print("Connecting to '%s%s'..." % (host, (port and ":%d"%port or ""))) f.connect(host,port) if not nologin: if verbose: print('Logging in as %r...' % (login or 'anonymous')) f.login(login, passwd, account) if verbose: print('OK.') pwd = f.pwd() if verbose > 1: print('PWD =', repr(pwd)) if remotedir: if verbose > 1: print('cwd(%s)' % repr(remotedir)) f.cwd(remotedir) if verbose > 1: print('OK.') pwd = f.pwd() if verbose > 1: print('PWD =', repr(pwd)) # mirrorsubdir(f, localdir) def mirrorsubdir(f, localdir): pwd = f.pwd() if localdir and not os.path.isdir(localdir): if verbose: print('Creating local directory', repr(localdir)) try: makedir(localdir) except os.error as msg: print("Failed to establish local directory", repr(localdir)) return infofilename = os.path.join(localdir, '.mirrorinfo') try: text = open(infofilename, 'r').read() except IOError as msg: text = '{}' try: info = eval(text) except (SyntaxError, NameError): print('Bad mirror info in', repr(infofilename)) info = {} subdirs = [] listing = [] if verbose: print('Listing remote directory %r...' % (pwd,)) f.retrlines('LIST', listing.append) filesfound = [] for line in listing: if verbose > 1: print('-->', repr(line)) if mac: # Mac listing has just filenames; # trailing / means subdirectory filename = line.strip() mode = '-' if filename[-1:] == '/': filename = filename[:-1] mode = 'd' infostuff = '' else: # Parse, assuming a UNIX listing words = line.split(None, 8) if len(words) < 6: if verbose > 1: print('Skipping short line') continue filename = words[-1].lstrip() i = filename.find(" -> ") if i >= 0: # words[0] had better start with 'l'... if verbose > 1: print('Found symbolic link %r' % (filename,)) linkto = filename[i+4:] filename = filename[:i] infostuff = words[-5:-1] mode = words[0] skip = 0 for pat in skippats: if fnmatch(filename, pat): if verbose > 1: print('Skip pattern', repr(pat), end=' ') print('matches', repr(filename)) skip = 1 break if skip: continue if mode[0] == 'd': if verbose > 1: print('Remembering subdirectory', repr(filename)) subdirs.append(filename) continue filesfound.append(filename) if filename in info and info[filename] == infostuff: if verbose > 1: print('Already have this version of',repr(filename)) continue fullname = os.path.join(localdir, filename) tempname = os.path.join(localdir, '@'+filename) if interactive: doit = askabout('file', filename, pwd) if not doit: if filename not in info: info[filename] = 'Not retrieved' continue try: os.unlink(tempname) except os.error: pass if mode[0] == 'l': if verbose: print("Creating symlink %r -> %r" % (filename, linkto)) try: os.symlink(linkto, tempname) except IOError as msg: print("Can't create %r: %s" % (tempname, msg)) continue else: try: fp = open(tempname, 'wb') except IOError as msg: print("Can't create %r: %s" % (tempname, msg)) continue if verbose: print('Retrieving %r from %r as %r...' % (filename, pwd, fullname)) if verbose: fp1 = LoggingFile(fp, 1024, sys.stdout) else: fp1 = fp t0 = time.time() try: f.retrbinary('RETR ' + filename, fp1.write, 8*1024) except ftplib.error_perm as msg: print(msg) t1 = time.time() bytes = fp.tell() fp.close() if fp1 != fp: fp1.close() try: os.unlink(fullname) except os.error: pass # Ignore the error try: os.rename(tempname, fullname) except os.error as msg: print("Can't rename %r to %r: %s" % (tempname, fullname, msg)) continue info[filename] = infostuff writedict(info, infofilename) if verbose and mode[0] != 'l': dt = t1 - t0 kbytes = bytes / 1024.0 print(int(round(kbytes)), end=' ') print('Kbytes in', end=' ') print(int(round(dt)), end=' ') print('seconds', end=' ') if t1 > t0: print('(~%d Kbytes/sec)' % \ int(round(kbytes/dt),)) print() # # Remove files from info that are no longer remote deletions = 0 for filename in list(info.keys()): if filename not in filesfound: if verbose: print("Removing obsolete info entry for", end=' ') print(repr(filename), "in", repr(localdir or ".")) del info[filename] deletions = deletions + 1 if deletions: writedict(info, infofilename) # # Remove local files that are no longer in the remote directory try: if not localdir: names = os.listdir(os.curdir) else: names = os.listdir(localdir) except os.error: names = [] for name in names: if name[0] == '.' or name in info or name in subdirs: continue skip = 0 for pat in skippats: if fnmatch(name, pat): if verbose > 1: print('Skip pattern', repr(pat), end=' ') print('matches', repr(name)) skip = 1 break if skip: continue fullname = os.path.join(localdir, name) if not rmok: if verbose: print('Local file', repr(fullname), end=' ') print('is no longer pertinent') continue if verbose: print('Removing local file/dir', repr(fullname)) remove(fullname) # # Recursively mirror subdirectories for subdir in subdirs: if interactive: doit = askabout('subdirectory', subdir, pwd) if not doit: continue if verbose: print('Processing subdirectory', repr(subdir)) localsubdir = os.path.join(localdir, subdir) pwd = f.pwd() if verbose > 1: print('Remote directory now:', repr(pwd)) print('Remote cwd', repr(subdir)) try: f.cwd(subdir) except ftplib.error_perm as msg: print("Can't chdir to", repr(subdir), ":", repr(msg)) else: if verbose: print('Mirroring as', repr(localsubdir)) mirrorsubdir(f, localsubdir) if verbose > 1: print('Remote cwd ..') f.cwd('..') newpwd = f.pwd() if newpwd != pwd: print('Ended up in wrong directory after cd + cd ..') print('Giving up now.') break else: if verbose > 1: print('OK.') def remove(fullname): if os.path.isdir(fullname) and not os.path.islink(fullname): try: names = os.listdir(fullname) except os.error: names = [] ok = 1 for name in names: if not remove(os.path.join(fullname, name)): ok = 0 if not ok: return 0 try: os.rmdir(fullname) except os.error as msg: print("Can't remove local directory %r: %s" % (fullname, msg)) return 0 else: try: os.unlink(fullname) except os.error as msg: print("Can't remove local file %r: %s" % (fullname, msg)) return 0 return 1 class LoggingFile: def __init__(self, fp, blocksize, outfp): self.fp = fp self.bytes = 0 self.hashes = 0 self.blocksize = blocksize self.outfp = outfp def write(self, data): self.bytes = self.bytes + len(data) hashes = int(self.bytes) / self.blocksize while hashes > self.hashes: self.outfp.write('#') self.outfp.flush() self.hashes = self.hashes + 1 self.fp.write(data) def close(self): self.outfp.write('\n') def askabout(filetype, filename, pwd): prompt = 'Retrieve %s %s from %s ? [ny] ' % (filetype, filename, pwd) while 1: reply = input(prompt).strip().lower() if reply in ['y', 'ye', 'yes']: return 1 if reply in ['', 'n', 'no', 'nop', 'nope']: return 0 print('Please answer yes or no.') def makedir(pathname): if os.path.isdir(pathname): return dirname = os.path.dirname(pathname) if dirname: makedir(dirname) os.mkdir(pathname, 0o777) def writedict(dict, filename): dir, fname = os.path.split(filename) tempname = os.path.join(dir, '@' + fname) backup = os.path.join(dir, fname + '~') try: os.unlink(backup) except os.error: pass fp = open(tempname, 'w') fp.write('{\n') for key, value in list(dict.items()): fp.write('%r: %r,\n' % (key, value)) fp.write('}\n') fp.close() try: os.rename(filename, backup) except os.error: pass os.rename(tempname, filename) if __name__ == '__main__': main()
""" *************************************************************************** ogr2ogrtabletopostgislist.py --------------------- Date : November 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'November 2012' __copyright__ = '(C) 2012, Victor Olaya' __revision__ = '$Format:%H$' from qgis.PyQt.QtCore import QSettings from processing.core.parameters import ParameterString from processing.core.parameters import ParameterTable from processing.core.parameters import ParameterSelection from processing.core.parameters import ParameterBoolean from processing.core.parameters import ParameterTableField from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils from processing.tools.system import isWindows from processing.tools.vector import ogrConnectionString, ogrLayerName class Ogr2OgrTableToPostGisList(GdalAlgorithm): DATABASE = 'DATABASE' INPUT_LAYER = 'INPUT_LAYER' HOST = 'HOST' PORT = 'PORT' USER = 'USER' DBNAME = 'DBNAME' PASSWORD = 'PASSWORD' SCHEMA = 'SCHEMA' TABLE = 'TABLE' PK = 'PK' PRIMARY_KEY = 'PRIMARY_KEY' WHERE = 'WHERE' GT = 'GT' OVERWRITE = 'OVERWRITE' APPEND = 'APPEND' ADDFIELDS = 'ADDFIELDS' LAUNDER = 'LAUNDER' SKIPFAILURES = 'SKIPFAILURES' PRECISION = 'PRECISION' OPTIONS = 'OPTIONS' def dbConnectionNames(self): settings = QSettings() settings.beginGroup('/PostgreSQL/connections/') return settings.childGroups() def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Import layer/table as geometryless table into PostgreSQL database') self.group, self.i18n_group = self.trAlgorithm('[OGR] Miscellaneous') self.DB_CONNECTIONS = self.dbConnectionNames() self.addParameter(ParameterSelection(self.DATABASE, self.tr('Database (connection name)'), self.DB_CONNECTIONS)) self.addParameter(ParameterTable(self.INPUT_LAYER, self.tr('Input layer'))) self.addParameter(ParameterString(self.SCHEMA, self.tr('Schema name'), 'public', optional=True)) self.addParameter(ParameterString(self.TABLE, self.tr('Table name, leave blank to use input name'), '', optional=True)) self.addParameter(ParameterString(self.PK, self.tr('Primary key'), 'id', optional=True)) self.addParameter(ParameterTableField(self.PRIMARY_KEY, self.tr('Primary key (existing field, used if the above option is left empty)'), self.INPUT_LAYER, optional=True)) self.addParameter(ParameterString(self.WHERE, self.tr('Select features using a SQL "WHERE" statement (Ex: column=\'value\')'), '', optional=True)) self.addParameter(ParameterString(self.GT, self.tr('Group N features per transaction (Default: 20000)'), '', optional=True)) self.addParameter(ParameterBoolean(self.OVERWRITE, self.tr('Overwrite existing table'), True)) self.addParameter(ParameterBoolean(self.APPEND, self.tr('Append to existing table'), False)) self.addParameter(ParameterBoolean(self.ADDFIELDS, self.tr('Append and add new fields to existing table'), False)) self.addParameter(ParameterBoolean(self.LAUNDER, self.tr('Do not launder columns/table names'), False)) self.addParameter(ParameterBoolean(self.SKIPFAILURES, self.tr('Continue after a failure, skipping the failed record'), False)) self.addParameter(ParameterBoolean(self.PRECISION, self.tr('Keep width and precision of input attributes'), True)) self.addParameter(ParameterString(self.OPTIONS, self.tr('Additional creation options'), '', optional=True)) def getConsoleCommands(self): connection = self.DB_CONNECTIONS[self.getParameterValue(self.DATABASE)] settings = QSettings() mySettings = '/PostgreSQL/connections/' + connection dbname = settings.value(mySettings + '/database') user = settings.value(mySettings + '/username') host = settings.value(mySettings + '/host') port = settings.value(mySettings + '/port') password = settings.value(mySettings + '/password') inLayer = self.getParameterValue(self.INPUT_LAYER) ogrLayer = ogrConnectionString(inLayer)[1:-1] schema = unicode(self.getParameterValue(self.SCHEMA)) table = unicode(self.getParameterValue(self.TABLE)) pk = unicode(self.getParameterValue(self.PK)) pkstring = "-lco FID=" + pk primary_key = self.getParameterValue(self.PRIMARY_KEY) where = unicode(self.getParameterValue(self.WHERE)) wherestring = '-where "' + where + '"' gt = unicode(self.getParameterValue(self.GT)) overwrite = self.getParameterValue(self.OVERWRITE) append = self.getParameterValue(self.APPEND) addfields = self.getParameterValue(self.ADDFIELDS) launder = self.getParameterValue(self.LAUNDER) launderstring = "-lco LAUNDER=NO" skipfailures = self.getParameterValue(self.SKIPFAILURES) precision = self.getParameterValue(self.PRECISION) options = unicode(self.getParameterValue(self.OPTIONS)) arguments = [] arguments.append('-progress') arguments.append('--config PG_USE_COPY YES') arguments.append('-f') arguments.append('PostgreSQL') arguments.append('PG:"host=') arguments.append(host) arguments.append('port=') arguments.append(port) if len(dbname) > 0: arguments.append('dbname=' + dbname) if len(password) > 0: arguments.append('password=' + password) if len(schema) > 0: arguments.append('active_schema=' + schema) else: arguments.append('active_schema=public') arguments.append('user=' + user + '"') arguments.append(ogrLayer) arguments.append('-nlt NONE') arguments.append(ogrLayerName(inLayer)) if launder: arguments.append(launderstring) if append: arguments.append('-append') if addfields: arguments.append('-addfields') if overwrite: arguments.append('-overwrite') if len(pk) > 0: arguments.append(pkstring) elif primary_key is not None: arguments.append("-lco FID=" + primary_key) if len(table) == 0: table = ogrLayerName(inLayer).lower() if schema: table = '{}.{}'.format(schema, table) arguments.append('-nln') arguments.append(table) if skipfailures: arguments.append('-skipfailures') if where: arguments.append(wherestring) if len(gt) > 0: arguments.append('-gt') arguments.append(gt) if not precision: arguments.append('-lco PRECISION=NO') if len(options) > 0: arguments.append(options) commands = [] if isWindows(): commands = ['cmd.exe', '/C ', 'ogr2ogr.exe', GdalUtils.escapeAndJoin(arguments)] else: commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)] return commands def commandName(self): return "ogr2ogr"
import socket def server_test(): s = socket.socket() host = socket.gethostname() port = 12345 s.bind((host, port)) s.listen(5) while True: c, addr = s.accept() print c print 'connect addr: ', addr c.send('Welcome to CaiNiao!') if cmp(c.recv(1024), "GoodBye") == 0: break c.close() s.close()
from common import Constant from common import utils from main.logger_helper import L __author__ = 'Dan Cristian <dan.cristian@gmail.com>' def save_to_history_cloud(obj): try: L.l.debug('Trying to save historical record to cloud {}'.format(obj)) if Constant.JSON_PUBLISH_GRAPH_X in obj: # name of x field axis_x_field = obj[Constant.JSON_PUBLISH_GRAPH_X] graph_id_field = obj[Constant.JSON_PUBLISH_GRAPH_ID] graph_legend_field = obj[Constant.JSON_PUBLISH_GRAPH_LEGEND] graph_shape_fields = obj[Constant.JSON_PUBLISH_GRAPH_SHAPE] graph_y_fields = obj[Constant.JSON_PUBLISH_GRAPH_Y] # names of fields that have value changed to record smallest amount of data changed_fields = obj[Constant.JSON_PUBLISH_FIELDS_CHANGED] # intersect lists and get only graphable fields that had values changed list_axis_y = list(set(graph_y_fields) & set(changed_fields)) if len(list_axis_y) == 0: L.l.info('Ignoring record save graph={} changed fields={} obj={}'.format(graph_y_fields, changed_fields, obj)) else: L.l.debug('Trying to save y axis {}'.format(list_axis_y)) if axis_x_field in obj and graph_id_field in obj: table = obj[Constant.JSON_PUBLISH_TABLE] trace_unique_id = obj[graph_id_field] # unique record/trace identifier x_val = obj[axis_x_field] graph_legend_item_name = obj[graph_legend_field] # unique key for legend x_val = utils.parse_to_date(x_val) x = x_val index = 0 field_pairs = [[axis_x_field, x], [graph_legend_field, graph_legend_item_name], [Constant.JSON_PUBLISH_RECORD_UUID, obj[Constant.JSON_PUBLISH_RECORD_UUID]], [Constant.JSON_PUBLISH_SOURCE_HOST, obj[Constant.JSON_PUBLISH_SOURCE_HOST]]] for axis_y in list_axis_y: if axis_y in obj: trace_list = [] y = obj[axis_y] # add multiple y values for later save in db as a single record field_pairs.append([axis_y, y]) # upload to cloud if plotly is initialised #from cloud import graph_plotly #if graph_plotly.initialised: # from cloud.graph_plotly import graph_plotly_run # Log.logger.info('Uploading to cloud field {}'.format(graph_legend_field)) # shape visual type for this trace # shape = graph_shape_fields[index] # unique name used for grid on upload # grid_base_name = str(table) # graph_plotly_run.add_grid_data(grid_unique_name=grid_base_name, x=x, y=y, # axis_x_name=axis_x_field, axis_y_name=axis_y, # record_unique_id_name=graph_legend_field, # record_unique_id_value=graph_legend_item_name) #Log.logger.debug('Skip upload to cloud, plotly not init') index += 1 else: L.l.critical('Missing history axis_x [{}], graph_id [{}], in obj {}'.format( axis_x_field,graph_id_field,obj)) else: L.l.critical('Missing history axis X field {}'.format(Constant.JSON_PUBLISH_GRAPH_X)) except Exception as ex: L.l.exception('General error saving historical cloud record, err {} obj={}'.format(ex, obj)) def save_to_history_db(obj): try: table = obj[Constant.JSON_PUBLISH_TABLE] # L.l.debug('Trying to save historical record to db={}'.format(table)) # save to local history DB, append history to source table name dest_table = str(table) + 'History' # L.l.debug('Saving to local db table {} obj={}'.format(dest_table, obj)) from storage.sqalc import models # http://stackoverflow.com/questions/4030982/initialise-class-object-by-name try: class_table = getattr(models, dest_table) new_record = class_table() for field in obj: if hasattr(new_record, field) and field != "id": setattr(new_record, field, obj[field]) if new_record.add_commit_record_to_db(): # L.l.debug('Saved OK to local db table {} obj={}'.format(dest_table, new_record)) pass else: L.l.critical("Cannot save history db record={}".format(obj)) except Exception as ex: L.l.critical("Cannot save history db err={} record={}".format(ex, obj)) except Exception as ex: L.l.exception('General error saving historical db record, err {} obj={}'.format(ex, obj))
import unittest import test_specify def main(): suite = unittest.TestSuite(( test_specify.suite(), )) unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == '__main__': main()
import networkx as nx class BaseTestAttributeMixing(object): def setUp(self): G=nx.Graph() G.add_nodes_from([0,1],fish='one') G.add_nodes_from([2,3],fish='two') G.add_nodes_from([4],fish='red') G.add_nodes_from([5],fish='blue') G.add_edges_from([(0,1),(2,3),(0,4),(2,5)]) self.G=G D=nx.DiGraph() D.add_nodes_from([0,1],fish='one') D.add_nodes_from([2,3],fish='two') D.add_nodes_from([4],fish='red') D.add_nodes_from([5],fish='blue') D.add_edges_from([(0,1),(2,3),(0,4),(2,5)]) self.D=D M=nx.MultiGraph() M.add_nodes_from([0,1],fish='one') M.add_nodes_from([2,3],fish='two') M.add_nodes_from([4],fish='red') M.add_nodes_from([5],fish='blue') M.add_edges_from([(0,1),(0,1),(2,3)]) self.M=M S=nx.Graph() S.add_nodes_from([0,1],fish='one') S.add_nodes_from([2,3],fish='two') S.add_nodes_from([4],fish='red') S.add_nodes_from([5],fish='blue') S.add_edge(0,0) S.add_edge(2,2) self.S=S class BaseTestDegreeMixing(object): def setUp(self): self.P4=nx.path_graph(4) self.D=nx.DiGraph() self.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) self.M=nx.MultiGraph() self.M.add_path(list(range(4))) self.M.add_edge(0,1) self.S=nx.Graph() self.S.add_edges_from([(0,0),(1,1)])
""" SQLAlchemy support. """ from __future__ import absolute_import import datetime from types import GeneratorType import decimal from sqlalchemy import func from sqlalchemy.orm.collections import InstrumentedList from sqlalchemy.sql.type_api import TypeDecorator try: from sqlalchemy.orm.relationships import RelationshipProperty except ImportError: from sqlalchemy.orm.properties import RelationshipProperty from sqlalchemy.types import ( BIGINT, BOOLEAN, BigInteger, Boolean, CHAR, DATE, DATETIME, DECIMAL, Date, DateTime, FLOAT, Float, INT, INTEGER, Integer, NCHAR, NVARCHAR, NUMERIC, Numeric, SMALLINT, SmallInteger, String, TEXT, TIME, Text, Time, Unicode, UnicodeText, VARCHAR, Enum) from .. import mix_types as t from ..main import ( SKIP_VALUE, LOGGER, TypeMixer as BaseTypeMixer, GenFactory as BaseFactory, Mixer as BaseMixer, partial, faker) class GenFactory(BaseFactory): """ Map a sqlalchemy classes to simple types. """ types = { (String, VARCHAR, Unicode, NVARCHAR, NCHAR, CHAR): str, (Text, UnicodeText, TEXT): t.Text, (Boolean, BOOLEAN): bool, (Date, DATE): datetime.date, (DateTime, DATETIME): datetime.datetime, (Time, TIME): datetime.time, (DECIMAL, Numeric, NUMERIC): decimal.Decimal, (Float, FLOAT): float, (Integer, INTEGER, INT): int, (BigInteger, BIGINT): t.BigInteger, (SmallInteger, SMALLINT): t.SmallInteger, } class TypeMixer(BaseTypeMixer): """ TypeMixer for SQLAlchemy. """ factory = GenFactory def __init__(self, cls, **params): """ Init TypeMixer and save the mapper. """ super(TypeMixer, self).__init__(cls, **params) self.mapper = self.__scheme._sa_class_manager.mapper def postprocess(self, target, postprocess_values): """ Fill postprocess values. """ mixed = [] for name, deffered in postprocess_values: value = deffered.value if isinstance(value, GeneratorType): value = next(value) if isinstance(value, t.Mix): mixed.append((name, value)) continue if isinstance(getattr(target, name), InstrumentedList) and not isinstance(value, list): value = [value] setattr(target, name, value) for name, mix in mixed: setattr(target, name, mix & target) if self.__mixer: target = self.__mixer.postprocess(target) return target @staticmethod def get_default(field): """ Get default value from field. :return value: A default value or NO_VALUE """ column = field.scheme if isinstance(column, RelationshipProperty): column = column.local_remote_pairs[0][0] if not column.default: return SKIP_VALUE if column.default.is_callable: return column.default.arg(None) return getattr(column.default, 'arg', SKIP_VALUE) def gen_select(self, field_name, select): """ Select exists value from database. :param field_name: Name of field for generation. :return : None or (name, value) for later use """ if not self.__mixer or not self.__mixer.params.get('session'): return field_name, SKIP_VALUE relation = self.mapper.get_property(field_name) session = self.__mixer.params.get('session') value = session.query( relation.mapper.class_ ).filter(*select.choices).order_by(func.random()).first() return self.get_value(field_name, value) @staticmethod def is_unique(field): """ Return True is field's value should be a unique. :return bool: """ scheme = field.scheme if isinstance(scheme, RelationshipProperty): scheme = scheme.local_remote_pairs[0][0] return scheme.unique @staticmethod def is_required(field): """ Return True is field's value should be defined. :return bool: """ column = field.scheme if isinstance(column, RelationshipProperty): column = column.local_remote_pairs[0][0] if field.params: return True # According to the SQLAlchemy docs, autoincrement "only has an effect for columns which are # Integer derived (i.e. INT, SMALLINT, BIGINT) [and] Part of the primary key [...]". return not column.nullable and not (column.autoincrement and column.primary_key and isinstance(column.type, Integer)) def get_value(self, field_name, field_value): """ Get `value` as `field_name`. :return : None or (name, value) for later use """ field = self.__fields.get(field_name) if field and isinstance(field.scheme, RelationshipProperty): return field_name, t._Deffered(field_value, field.scheme) return super(TypeMixer, self).get_value(field_name, field_value) def make_fabric(self, column, field_name=None, fake=False, kwargs=None): # noqa """ Make values fabric for column. :param column: SqlAlchemy column :param field_name: Field name :param fake: Force fake data :return function: """ kwargs = {} if kwargs is None else kwargs if isinstance(column, RelationshipProperty): return partial(type(self)( column.mapper.class_, mixer=self.__mixer, fake=self.__fake, factory=self.__factory ).blend, **kwargs) ftype = type(column.type) # augmented types created with TypeDecorator # don't directly inherit from the base types if TypeDecorator in ftype.__bases__: ftype = ftype.impl stype = self.__factory.cls_to_simple(ftype) if stype is str: fab = super(TypeMixer, self).make_fabric( stype, field_name=field_name, fake=fake, kwargs=kwargs) return lambda: fab()[:column.type.length] if ftype is Enum: return partial(faker.random_element, column.type.enums) return super(TypeMixer, self).make_fabric( stype, field_name=field_name, fake=fake, kwargs=kwargs) def guard(self, *args, **kwargs): """ Look objects in database. :returns: A finded object or False """ try: session = self.__mixer.params.get('session') assert session except (AttributeError, AssertionError): raise ValueError('Cannot make request to DB.') qs = session.query(self.mapper).filter(*args, **kwargs) count = qs.count() if count == 1: return qs.first() if count: return qs.all() return False def reload(self, obj): """ Reload object from database. """ try: session = self.__mixer.params.get('session') session.expire(obj) session.refresh(obj) return obj except (AttributeError, AssertionError): raise ValueError('Cannot make request to DB.') def __load_fields(self): """ Prepare SQLALchemyTypeMixer. Select columns and relations for data generation. """ mapper = self.__scheme._sa_class_manager.mapper relations = set() if hasattr(mapper, 'relationships'): for rel in mapper.relationships: relations |= rel.local_columns yield rel.key, t.Field(rel, rel.key) for key, column in mapper.columns.items(): if column not in relations: yield key, t.Field(column, key) class Mixer(BaseMixer): """ Integration with SQLAlchemy. """ type_mixer_cls = TypeMixer def __init__(self, session=None, commit=True, **params): """Initialize the SQLAlchemy Mixer. :param fake: (True) Generate fake data instead of random data. :param session: SQLAlchemy session. Using for commits. :param commit: (True) Commit instance to session after creation. """ super(Mixer, self).__init__(**params) self.params['session'] = session self.params['commit'] = bool(session) and commit def postprocess(self, target): """ Save objects in db. :return value: A generated value """ if self.params.get('commit'): session = self.params.get('session') if not session: LOGGER.warn("'commit' set true but session not initialized.") else: session.add(target) session.commit() return target mixer = Mixer()
import csv import codecs import re import argparse import os from prettytable import PrettyTable report08_schools = {} report08_employees = {} report08_school_employees = {} report16_employee = None report16_absents = {} employee_school_exclusions = {} excluced_schools = list() excluced_employees = dict() def filterAFM(rawAFM): return re.search('=\"(\d*)\"', rawAFM).group(1) def csv_unireader(f, encoding="utf-8"): for row in csv.reader(codecs.iterencode(codecs.iterdecode(f, encoding), "utf-8"), delimiter=';', quotechar='"'): yield [e.decode("utf-8") for e in row] def parseEmployeeExclusionList(reportPath): """ Parses a CSV which in the first column contains the IDs of all employees that need to be excluded from processing :param reportPath: :return: a list of schools ids to exclude """ result = dict() with open(reportPath, 'rb') as report_csvfile: reader = csv_unireader(report_csvfile, encoding='iso8859-7') for row in reader: afm = str(row[0]) afm = afm if len(afm)==9 else '0'+afm result[afm]=(row[1] if len(row)>1 and row[1] != u'' else u'Άγνωστος λόγος εξαίρεσεις') return result def parseSchoolExclusionList(reportPath): """ Parses a CSV which in the first column contains the IDs of all schools that need to be excluded from processing :param reportPath: :return: a list of schools ids to exclude """ result = list() with open(reportPath, 'rb') as report_csvfile: reader = csv_unireader(report_csvfile, encoding='iso8859-7') for row in reader: result.append(row[0]) return result def parseReport16(reportPath='/Users/slavikos/Downloads/CSV_2015-06-03-100905.csv'): """ Parse report 16 (Κατάλογος Εκπαιδευτικών που Απουσιάζουν από Σχολικές Μονάδες) :param reportPath: :return: """ report16_absence_reasons = [u'ΜΑΚΡΟΧΡΟΝΙΑ ΑΔΕΙΑ (>10 ημέρες)',u'ΑΠΟΣΠΑΣΗ ΣΤΟ ΕΞΩΤΕΡΙΚΟ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΦΟΡΕΑ ΥΠ. ΠΑΙΔΕΙΑΣ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΑΛΛΟ ΠΥΣΠΕ / ΠΥΣΔΕ',u'ΑΠΟΣΠΑΣΗ ΣΕ ΦΟΡΕΑ ΕΚΤΟΣ ΥΠ. ΠΑΙΔΕΙΑΣ',u'ΟΛΙΚΗ ΔΙΑΘΕΣΗ ΣΕ ΑΠΟΚΕΝΤΡΩΜΕΝΕΣ ΥΠΗΡΕΣΙΕΣ ΥΠ. ΠΑΙΔΕΙΑΣ'] result = {} with open(reportPath, 'rb') as report_csvfile: reader = csv_unireader(report_csvfile, encoding='iso8859-7') firstRow = True for row in reader: if firstRow: # first row contains firstRow = False continue # note that employee with employeeAfm is missing from school schoolId result[filterAFM(row[12])] = { "schoolId": row[6], "reason": "%s (%s)" % (row[22], row[23]) } # check if generally absent (in case of multiple assignments) and insert in report16_absents if row[24] in report16_absence_reasons or unicode(row[24]).startswith(u'ΜΑΚΡΟΧΡΟΝΙΑ ΑΔΕΙΑ (>10 ημέρες)'): report16_absents[filterAFM(row[12])] = row[24] return result def parseReport08(reportPath='/Users/slavikos/Downloads/CSV_2015-06-02-130003.csv'): excluded_school_types = [u'Νηπιαγωγεία'] with open(reportPath, 'rb') as report08_csvfile: spamreader = csv_unireader(report08_csvfile, encoding='iso8859-7') firstRow = True for row in spamreader: if firstRow: firstRow = False continue #exclude some school types if row[4] in excluded_school_types: continue # check if the school id is excluded if row[6] in excluced_schools: continue # get school object schoolObj = report08_schools.get(row[6], None) if not schoolObj: # first time we see that school schoolObj = { 'id': row[6], 'title': row[7], 'email': row[10], 'employees': list() } # add school to dict report08_schools[row[6]] = schoolObj # fetch employee from cache employeeAfm = filterAFM(row[16]) employeeObj = report08_employees.get(employeeAfm, None) if not employeeObj: # first time we see that employee employeeObj = { 'id': row[15] if row[15] else '', 'afm': employeeAfm, 'name': row[19], 'surname': row[18], 'fatherName': row[20], 'specialization': row[28], 'assigments': list() } # add the employee in the dict report08_employees[employeeObj.get('afm')] = employeeObj # add to the school as dict as well schoolObj['employees'].append(employeeObj) else: # employee exists in the report08_employee dict, so add it # (if he does not exist) in the schools dict as well if employeeObj not in schoolObj['employees']: schoolObj['employees'].append(employeeObj) assigmentObj = { 'schoolId': schoolObj['id'], 'type': row[33], 'assigment': row[34], 'isMaster': True if row[35] == u'Ναι' else False, 'hours': int(row[44]) if row[44] else 0, # Ώρες Υποχ. Διδακτικού Ωραρίου Υπηρέτησης στο Φορέα 'teachingHours': (int(row[46]) if row[46] else 0) + (int(row[47]) if row[47] else 0), } employeeObj['assigments'].append(assigmentObj) # report08_school_employees[schoolObj['id']].append(assigmentObj) def isExcluded(employeeAfm, schoolId): """ Determines if an employee is excluded from school unit id. If the schoolId is None, then the operation will check the general exclusion list. The operation will return None if the employee is not excluded or a description if the employee should be excluded :param employeeAfm: The employee's AFM :type employeeAfm: str :param schoolId: The school ID to check for exclusion :type schoolId: str :return: None if the employee is not excluded or a description if the employee should be excluded """ if schoolId is None: return excluced_employees.get(employeeAfm, None) if len(employee_school_exclusions) > 0: exclusion = employee_school_exclusions.get(employeeAfm, None) if exclusion: # employee is probably excluded if exclusion.get('schoolId', '') == schoolId: return exclusion.get('reason', u"Άγνωστος λόγος εξαίρεσεις") else: return None else: return None else: return None def processSchool(id, filter0=False): schoolObj = report08_schools.get(id, None) acceptedList = list() rejectedList = list() # fetch school employees, if school is not excluded schoolEmployees = schoolObj.get('employees', list()) if id not in excluced_schools else list() for employee in schoolEmployees: # check if the employee is in the general exclusion list excludedReason = isExcluded(employeeAfm=employee['afm'], schoolId=None) # check if the employee is in the exclusion list (for the given school) if excludedReason is None: excludedReason = isExcluded(employeeAfm=employee['afm'], schoolId=schoolObj['id']) if excludedReason: # employee has been excluded rejectedList.append( { 'employee': employee, 'excludedReason': excludedReason, } ) continue if report16_absents and employee['afm'] in report16_absents: # exclude report16_absents from all schools (if they have more than one assignments) continue # some (in our case pe05, pe07) employees may have multiple secondary assignments with equal, more than the main, hours # if this happens, select and enroll them in their main assignment school (as instructed by the ministry of education) foundAssigment = None mainAssigment = None mainAssigmentHours = None assigmentHours = list() if len(employee['assigments']) > 2: for assigment in employee['assigments']: if assigment['assigment'] == u'Από Διάθεση ΠΥΣΠΕ/ΠΥΣΔΕ': mainAssigment = assigment mainAssigmentHours = assigment['hours'] continue else: assigmentHours.append (assigment['hours']) continue maxHours = max(assigmentHours) if assigmentHours.count(maxHours)>1: foundAssigment = mainAssigment # end of multi max assignments primaryAssignemtns = [ u'Από Διάθεση ΠΥΣΠΕ/ΠΥΣΔΕ', u'Απόσπαση (με αίτηση - κύριος φορέας)', u'Οργανικά', u'Οργανικά από Άρση Υπεραριθμίας' ] selectedAssigment = None for assigment in employee['assigments']: if foundAssigment: selectedAssigment = foundAssigment break if not selectedAssigment: selectedAssigment = employee['assigments'][0] continue if assigment['hours'] > selectedAssigment['hours']: # found an assigment with more hours, check the # new assigment selectedAssigment = assigment elif assigment['hours'] == selectedAssigment['hours']: # deal with same hour assignments # selected assigment will be accepted if the type is a primary assignment if assigment['assigment'] in primaryAssignemtns: selectedAssigment = assigment else: pass # we've checked all assignments and we have the selected assignment # in the selectedAssigment variable. Check if the assignment references # the current school and the hours attribute is > 0 if selectedAssigment['schoolId'] == id and selectedAssigment['hours'] > 0: if filter0 and selectedAssigment['teachingHours'] == 0: # we've been asked to filter out employees with assignments # in the current school but without teaching hours rejectedList.append({ 'employee': employee, 'excludedReason': u"Αποκλεισμός λόγο μη ανάθεσης διδακτικού έργου στην μονάδα", }) continue # woooo! we have a winner ! acceptedList.append( { 'employee': employee, 'assigment': selectedAssigment, } ) else: # ok, employee is rejected schName = report08_schools.get(selectedAssigment['schoolId'], None)['title'] rejectedList.append( { 'employee': employee, 'excludedReason': u"Τοποθετημένος για '%s' ώρες στην μονάδα '%s' (%s)\n με σχέση '%s'(Σχ.Έργ.: '%s')" % (selectedAssigment['hours'], selectedAssigment['schoolId'], schName, selectedAssigment['assigment'], selectedAssigment['type']), } ) return { 'school' : schoolObj, 'accepted': sorted(acceptedList, key=lambda employee: employee['employee']['surname']), 'rejected': sorted(rejectedList, key=lambda employee: employee['employee']['surname']), } def writeReportToFile(reportName, resultStr, basePath='/tmp', encoding="utf-8"): filePath = os.path.join(basePath, reportName) with codecs.open(filePath, mode="w", encoding=encoding) as textFile: textFile.write(resultStr) return filePath def replace_all(text, dic): for i, j in dic.iteritems(): text = text.replace(i, j) return text def shortenTitle(schName): shortenDic = {u'ΟΛΟΗΜΕΡΟ' : u'ΟΛ', u'ΔΗΜΟΤΙΚΟ' : u'Δ.', u'ΣΧΟΛΕΙΟ' : u'Σ.', u'/' : ''} return replace_all(schName, shortenDic) def printTabularResults(result, includeRejected=False): schoolObj = result.get('school', dict()) resultString = "\n" resultString = resultString + "::::::::::::::::::::::::::::::::::::::::::::::::\n" resultString = resultString + ":: %s - (%s) ::\n" % (schoolObj['title'], schoolObj['id']) resultString = resultString + "::::::::::::::::::::::::::::::::::::::::::::::::\n" resultString = resultString + "\n\n" x = PrettyTable(["#","ΑΜ", "ΑΦΜ", u"ΕΠΩΝΥΜΟ", u"ΟΝΟΜΑ", u"ΠΑΤΡΩΝΥΜΟ", u"ΕΙΔΙΚΟΤΗΤΑ", u"ΣΧΕΣΗ ΕΡΓΑΣΙΑΣ", u"ΤΟΠΟΘΕΤΗΣΗ ΣΤΗΝ ΜΟΝΑΔΑ", u"ΩΡΑΡΙΟ", u"ΑΝΑΘΕΣΕΙΣ"]) x.align[u"#"] = "l" x.align[u"ΕΠΩΝΥΜΟ"] = "r" x.align[u"ΟΝΟΜΑ"] = "r" x.align[u"ΠΑΤΡΩΝΥΜΟ"] = "r" x.align[u"ΕΙΔΙΚΟΤΗΤΑ"] = "r" x.align[u"ΣΧΕΣΗ ΕΡΓΑΣΙΑΣ"] = "r" x.align[u"ΤΟΠΟΘΕΤΗΣΗ ΣΤΗΝ ΜΟΝΑΔΑ"] = "r" x.align[u"ΩΡΑΡΙΟ"] = "r" x.align[u"ΑΝΑΘΕΣΕΙΣ"] = "r" counter = 1 for r in result.get('accepted', list()): e = r['employee'] a = r['assigment'] x.add_row([counter, e['id'], e['afm'], e['surname'], e['name'], e['fatherName'], e['specialization'], a['type'], a['assigment'], a['hours'], a['teachingHours']]) counter = counter + 1 resultString = resultString + x.get_string() if includeRejected: x = PrettyTable(["#","ΑΜ", "ΑΦΜ", u"ΕΠΩΝΥΜΟ", u"ΟΝΟΜΑ", u"ΠΑΤΡΩΝΥΜΟ", u"ΕΙΔΙΚΟΤΗΤΑ", u"ΑΠΟΚΛΕΙΣΜΟΣ ΑΠΟ ΨΗΦΟΦΟΡΙΑ"]) x.align[u"#"] = "l" x.align[u"ΕΠΩΝΥΜΟ"] = "r" x.align[u"ΟΝΟΜΑ"] = "r" x.align[u"ΠΑΤΡΩΝΥΜΟ"] = "r" x.align[u"ΕΙΔΙΚΟΤΗΤΑ"] = "r" x.align[u"ΑΠΟΚΛΕΙΣΜΟΣ ΑΠΟ ΨΗΦΟΦΟΡΙΑ"] = "l" counter = 1 for r in result.get('rejected', list()): e = r['employee'] x.add_row([counter, e['id'], e['afm'], e['surname'], e['name'], e['fatherName'], e['specialization'], r['excludedReason'] ]) counter = counter + 1 resultString = resultString + "\n\n" resultString = resultString + u"###############################\n" resultString = resultString + u"##### Λίστα Αποκλεισμένων #####\n" resultString = resultString + u"###############################\n" resultString = resultString + "\n\n" resultString = resultString + x.get_string() return resultString if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-r8', "--report8", help="path to myschool report 8", required=True, type=str) parser.add_argument('-r16', "--report16", help="path to myschool report 16", type=str) parser.add_argument('-se', "--schoolExclusion", help="path to school exclusion list", type=str) parser.add_argument('-ee', "--employeeExclusion", help="path to school exclusion list", type=str) parser.add_argument('--schoolId', type=str, help='generate report for the given school id') parser.add_argument('--filter0', action='store_true', default=False, help='filter employees without teaching hour(s)') parser.add_argument('--rejected', action='store_true', default=False, help='print rejected employees in results') parser.add_argument('--outputDir', type=str, help='the base path where output files should be placed') parser.add_argument('--titleFiles', action='store_true', default=False, help='output school titles as filenames') parser.add_argument('--outputEncoding', default='utf-8', help='set output encdoding') args = parser.parse_args() if args.schoolExclusion: # path to school exclusion has been specified, so go and parse excluced_schools = parseSchoolExclusionList(reportPath=args.schoolExclusion) if args.employeeExclusion: excluced_employees = parseEmployeeExclusionList(reportPath=args.employeeExclusion) # parse report 08 as it is mandatory ! parseReport08(reportPath=args.report8) if args.report16: # path to report 16 has been specified, so parse! employee_school_exclusions.update(parseReport16(reportPath=args.report16)) if args.schoolId: schoolObj = report08_schools[args.schoolId] result = processSchool(id=args.schoolId, filter0=args.filter0) r = printTabularResults(result, includeRejected=args.rejected) if args.outputDir: outputFileName = shortenTitle(schoolObj['title']) if args.titleFiles else args.schoolId path = writeReportToFile(reportName=("%s.txt" % outputFileName), resultStr=r, basePath=args.outputDir, encoding=args.outputEncoding) print "[*] School '%s' (%s) report has been written to file '%s'" % (args.schoolId,schoolObj['title'], path) else: print r exit() for school in report08_schools: schoolObj = report08_schools[school] result = processSchool(id=school, filter0=args.filter0) r = printTabularResults(result, includeRejected=args.rejected) if args.outputDir: outputFileName = shortenTitle(schoolObj['title']) if args.titleFiles else school path = writeReportToFile(reportName=("%s.txt" % outputFileName), resultStr=r, basePath=args.outputDir, encoding=args.outputEncoding) print "[*] School '%s' (%s) report has been written to file '%s'" % (school,schoolObj['title'], path) else: print r
import bpy from mathutils import Matrix def DefQuickParent(inf, out): ob = bpy.context.object if ob.type == "ARMATURE": target = [object for object in bpy.context.selected_objects if object != ob][0] ob = bpy.context.active_pose_bone if bpy.context.object.type == 'ARMATURE' else bpy.context.object target.select = False bpy.context.scene.frame_set(frame=bpy.context.scene.quick_animation_in) a = Matrix(target.matrix_world) a.invert() i = Matrix(ob.matrix) for frame in range(inf, out): bpy.context.scene.frame_set(frame=frame) ob.matrix = target.matrix_world * a * i bpy.ops.anim.keyframe_insert(type="LocRotScale") else: target = [object for object in bpy.context.selected_objects if object != ob][0] ob = bpy.context.active_pose_bone if bpy.context.object.type == 'ARMATURE' else bpy.context.object target.select = False bpy.context.scene.frame_set(frame=bpy.context.scene.quick_animation_in) a = Matrix(target.matrix_world) a.invert() i = Matrix(ob.matrix_world) for frame in range(inf, out): bpy.context.scene.frame_set(frame=frame) ob.matrix_world = target.matrix_world * a * i bpy.ops.anim.keyframe_insert(type="LocRotScale") class QuickParent(bpy.types.Operator): """Creates a parent from one object to other in a selected frame range""" bl_idname = "anim.quick_parent_osc" bl_label = "Quick Parent" bl_options = {"REGISTER", "UNDO"} def execute(self, context): DefQuickParent( bpy.context.scene.quick_animation_in, bpy.context.scene.quick_animation_out, ) return {'FINISHED'}
from __future__ import division, print_function import unittest import inspect import sympy from sympy import symbols import numpy as np from symfit.api import Variable, Parameter, Fit, FitResults, Maximize, Minimize, exp, Likelihood, ln, log, variables, parameters from symfit.functions import Gaussian, Exp import scipy.stats from scipy.optimize import curve_fit from symfit.core.support import sympy_to_scipy, sympy_to_py import matplotlib.pyplot as plt import seaborn class TddInPythonExample(unittest.TestCase): def test_gaussian(self): x0, sig = parameters('x0, sig') x = Variable() new = sympy.exp(-(x - x0)**2/(2*sig**2)) self.assertIsInstance(new, sympy.exp) g = Gaussian(x, x0, sig) self.assertTrue(issubclass(g.__class__, sympy.exp)) def test_callable(self): a, b = parameters('a, b') x, y = variables('x, y') func = a*x**2 + b*y**2 result = func(x=2, y=3, a=3, b=9) self.assertEqual(result, 3*2**2 + 9*3**2) xdata = np.arange(1,10) ydata = np.arange(1,10) result = func(x=ydata, y=ydata, a=3, b=9) self.assertTrue(np.array_equal(result, 3*xdata**2 + 9*ydata**2)) def test_read_only_results(self): """ Fit results should be read-only. Let's try to break this! """ xdata = np.linspace(1,10,10) ydata = 3*xdata**2 a = Parameter(3.0, min=2.75) b = Parameter(2.0, max=2.75) x = Variable('x') new = a*x**b fit = Fit(new, xdata, ydata) fit_result = fit.execute() # Break it! try: fit_result.params = 'hello' except AttributeError: self.assertTrue(True) # desired result else: self.assertNotEqual(fit_result.params, 'hello') try: # Bypass the property getter. This will work, as it set's the instance value of __params. fit_result.__params = 'hello' except AttributeError as foo: self.assertTrue(False) # undesired result else: self.assertNotEqual(fit_result.params, 'hello') # The assginment will have succeeded on the instance because we set it from the outside. # I must admit I don't fully understand why this is allowed and I don't like it. # However, the tests below show that it did not influence the class method itself so # fitting still works fine. self.assertEqual(fit_result.__params, 'hello') # Do a second fit and dubble check that we do not overwrtie something crusial. xdata = np.arange(-5, 5, 1) ydata = np.arange(-5, 5, 1) xx, yy = np.meshgrid(xdata, ydata, sparse=False) xdata_coor = np.dstack((xx, yy)) zdata = (2.5*xx**2 + 3.0*yy**2) a = Parameter(2.5, max=2.75) b = Parameter(3.0, min=2.75) x = Variable() y = Variable() new = (a*x**2 + b*y**2) fit_2 = Fit(new, xdata_coor, zdata) fit_result_2 = fit_2.execute() self.assertNotAlmostEqual(fit_result.params.a, fit_result_2.params.a) self.assertAlmostEqual(fit_result.params.a, 3.0) self.assertAlmostEqual(fit_result_2.params.a, 2.5) self.assertNotAlmostEqual(fit_result.params.b, fit_result_2.params.b) self.assertAlmostEqual(fit_result.params.b, 2.0) self.assertAlmostEqual(fit_result_2.params.b, 3.0) def test_fitting(self): xdata = np.linspace(1,10,10) ydata = 3*xdata**2 a = Parameter(3.0) b = Parameter(2.0) x = Variable('x') new = a*x**b fit = Fit(new, xdata, ydata) func = sympy_to_py(new, [x], [a, b]) result = func(xdata, 3, 2) self.assertTrue(np.array_equal(result, ydata)) result = fit.scipy_func(fit.xdata, [3, 2]) self.assertTrue(np.array_equal(result, ydata)) args, varargs, keywords, defaults = inspect.getargspec(func) # self.assertEqual(args, ['x', 'a', 'b']) fit_result = fit.execute() self.assertIsInstance(fit_result, FitResults) self.assertAlmostEqual(fit_result.params.a, 3.0) self.assertAlmostEqual(fit_result.params.b, 2.0) self.assertIsInstance(fit_result.params.a_stdev, float) self.assertIsInstance(fit_result.params.b_stdev, float) self.assertIsInstance(fit_result.r_squared, float) # Test several false ways to access the data. self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_fdska']) self.assertRaises(AttributeError, getattr, *[fit_result.params, 'c']) self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_stdev']) self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_']) self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a__stdev']) def test_numpy_functions(self): xdata = np.linspace(1,10,10) ydata = 45*np.log(xdata*2) a = Parameter() b = Parameter(value=2.1, fixed=True) x = Variable() new = a*sympy.log(x*b) def test_grid_fitting(self): xdata = np.arange(-5, 5, 1) ydata = np.arange(-5, 5, 1) xx, yy = np.meshgrid(xdata, ydata, sparse=False) xdata_coor = np.dstack((xx, yy)) zdata = (2.5*xx**2 + 3.0*yy**2) a = Parameter(2.5, max=2.75) b = Parameter(3.0, min=2.75) x = Variable() y = Variable() new = (a*x**2 + b*y**2) fit = Fit(new, xdata_coor, zdata) # Test the flatten function for consistency. xdata_coor_flat, zdata_flat = fit._flatten(xdata_coor, zdata) # _flatten transposes such arrays because the variables are in the deepest dimension instead of the first. # This is normally not a problem because all we want from the fit is the correct parameters. self.assertFalse(np.array_equal(zdata, zdata_flat.reshape((10,10)))) self.assertTrue(np.array_equal(zdata, zdata_flat.reshape((10,10)).T)) self.assertFalse(np.array_equal(xdata_coor, xdata_coor_flat.reshape((10,10,2)))) new_xdata = xdata_coor_flat.reshape((2,10,10)).T self.assertTrue(np.array_equal(xdata_coor, new_xdata)) results = fit.execute() self.assertAlmostEqual(results.params.a, 2.5) self.assertAlmostEqual(results.params.b, 3.) def test_2D_fitting(self): xdata = np.random.randint(-10, 11, size=(2, 400)) zdata = 2.5*xdata[0]**2 + 7.0*xdata[1]**2 a = Parameter() b = Parameter() x = Variable() y = Variable() new = a*x**2 + b*y**2 fit = Fit(new, xdata, zdata) result = fit.scipy_func(fit.xdata, [2, 3]) import inspect args, varargs, keywords, defaults = inspect.getargspec(fit.scipy_func) self.assertEqual(args, ['x', 'p']) fit_result = fit.execute() self.assertIsInstance(fit_result, FitResults) def test_gaussian_fitting(self): xdata = 2*np.random.rand(10000) - 1 # random betwen [-1, 1] ydata = scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0) x0 = Parameter() sig = Parameter() A = Parameter() x = Variable() g = A * Gaussian(x, x0, sig) fit = Fit(g, xdata, ydata) fit_result = fit.execute() self.assertAlmostEqual(fit_result.params.A, 0.3989423) self.assertAlmostEqual(np.abs(fit_result.params.sig), 1.0) self.assertAlmostEqual(fit_result.params.x0, 0.0) # raise Exception([i for i in fit_result.params]) sexy = g(x=2.0, **fit_result.params) ugly = g( x=2.0, x0=fit_result.params.x0, A=fit_result.params.A, sig=fit_result.params.sig, ) self.assertEqual(sexy, ugly) def test_2_gaussian_2d_fitting(self): np.random.seed(4242) mean = (0.3, 0.3) # x, y mean 0.6, 0.4 cov = [[0.01**2,0],[0,0.01**2]] data = np.random.multivariate_normal(mean, cov, 1000000) mean = (0.7,0.7) # x, y mean 0.6, 0.4 cov = [[0.01**2,0],[0,0.01**2]] data_2 = np.random.multivariate_normal(mean, cov, 1000000) data = np.vstack((data, data_2)) # Insert them as y,x here as np fucks up cartesian conventions. ydata, xedges, yedges = np.histogram2d(data[:,1], data[:,0], bins=100, range=[[0.0, 1.0], [0.0, 1.0]]) xcentres = (xedges[:-1] + xedges[1:]) / 2 ycentres = (yedges[:-1] + yedges[1:]) / 2 # Make a valid grid to match ydata xx, yy = np.meshgrid(xcentres, ycentres, sparse=False) xdata = np.dstack((xx, yy)).T x = Variable() y = Variable() x0_1 = Parameter(0.7, min=0.6, max=0.8) sig_x_1 = Parameter(0.1, min=0.0, max=0.2) y0_1 = Parameter(0.7, min=0.6, max=0.8) sig_y_1 = Parameter(0.1, min=0.0, max=0.2) A_1 = Parameter() g_1 = A_1 * Gaussian(x, x0_1, sig_x_1) * Gaussian(y, y0_1, sig_y_1) x0_2 = Parameter(0.3, min=0.2, max=0.4) sig_x_2 = Parameter(0.1, min=0.0, max=0.2) y0_2 = Parameter(0.3, min=0.2, max=0.4) sig_y_2 = Parameter(0.1, min=0.0, max=0.2) A_2 = Parameter() g_2 = A_2 * Gaussian(x, x0_2, sig_x_2) * Gaussian(y, y0_2, sig_y_2) model = g_1 + g_2 fit = Fit(model, xdata, ydata) fit_result = fit.execute() img = model(x=xx, y=yy, **fit_result.params) img_g_1 = g_1(x=xx, y=yy, **fit_result.params) # Equal up to some precision. Not much obviously. self.assertAlmostEqual(fit_result.params.x0_1, 0.7, 2) self.assertAlmostEqual(fit_result.params.y0_1, 0.7, 2) self.assertAlmostEqual(fit_result.params.x0_2, 0.3, 2) self.assertAlmostEqual(fit_result.params.y0_2, 0.3, 2) def test_gaussian_2d_fitting(self): mean = (0.6,0.4) # x, y mean 0.6, 0.4 cov = [[0.2**2,0],[0,0.1**2]] data = np.random.multivariate_normal(mean, cov, 1000000) # Insert them as y,x here as np fucks up cartesian conventions. ydata, xedges, yedges = np.histogram2d(data[:,0], data[:,1], bins=100, range=[[0.0, 1.0], [0.0, 1.0]]) xcentres = (xedges[:-1] + xedges[1:]) / 2 ycentres = (yedges[:-1] + yedges[1:]) / 2 # Make a valid grid to match ydata xx, yy = np.meshgrid(xcentres, ycentres, sparse=False) xdata = np.dstack((xx, yy)).T # T because np fucks up conventions. x0 = Parameter(0.6) sig_x = Parameter(0.2, min=0.0) x = Variable() y0 = Parameter(0.4) sig_y = Parameter(0.1, min=0.0) A = Parameter() y = Variable() g = A * Gaussian(x, x0, sig_x) * Gaussian(y, y0, sig_y) fit = Fit(g, xdata, ydata) fit_result = fit.execute() # Again, the order seems to be swapped for py3k self.assertAlmostEqual(fit_result.params.x0, np.mean(data[:,0]), 3) self.assertAlmostEqual(fit_result.params.y0, np.mean(data[:,1]), 3) self.assertAlmostEqual(np.abs(fit_result.params.sig_x), np.std(data[:,0]), 3) self.assertAlmostEqual(np.abs(fit_result.params.sig_y), np.std(data[:,1]), 3) self.assertGreaterEqual(fit_result.r_squared, 0.99) def test_minimize(self): x = Parameter(-1.) y = Parameter() model = 2*x*y + 2*x - x**2 - 2*y**2 from sympy import Eq, Ge constraints = [ Ge(y - 1, 0), #y - 1 >= 0, Eq(x**3 - y, 0), # x**3 - y == 0, ] # raise Exception(model.atoms(), model.as_ordered_terms()) # self.assertIsInstance(constraints[0], Eq) # Unbounded fit = Maximize(model) fit_result = fit.execute() self.assertAlmostEqual(fit_result.params.y, 1.) self.assertAlmostEqual(fit_result.params.x, 2.) fit = Maximize(model, constraints=constraints) fit_result = fit.execute() self.assertAlmostEqual(fit_result.params.x, 1.00000009) self.assertAlmostEqual(fit_result.params.y, 1.) def test_scipy_style(self): def func(x, sign=1.0): """ Objective function """ return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2) def func_deriv(x, sign=1.0): """ Derivative of objective function """ dfdx0 = sign*(-2*x[0] + 2*x[1] + 2) dfdx1 = sign*(2*x[0] - 4*x[1]) return np.array([ dfdx0, dfdx1 ]) cons = ( {'type': 'eq', 'fun' : lambda x: np.array([x[0]**3 - x[1]]), 'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])}, {'type': 'ineq', 'fun' : lambda x: np.array([x[1] - 1]), 'jac' : lambda x: np.array([0.0, 1.0])}) from scipy.optimize import minimize res = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv, method='SLSQP', options={'disp': True}) res = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv, constraints=cons, method='SLSQP', options={'disp': True}) def test_likelihood_fitting(self): """ Fit using the likelihood method. """ b = Parameter(4, min=3.0) x = Variable() pdf = (1/b) * exp(- x / b) # Draw 100 points from an exponential distribution. # np.random.seed(100) xdata = np.random.exponential(5, 100000) fit = Likelihood(pdf, xdata) fit_result = fit.execute() self.assertAlmostEqual(fit_result.params.b, 5., 1) def test_parameter_add(self): a = Parameter(value=1.0, min=0.5, max=1.5) b = Parameter(1.0, min=0.0) new = a + b self.assertIsInstance(new, sympy.Add) def test_argument_name(self): a = Parameter() b = Parameter(name='b') c = Parameter(name='d') self.assertEqual(a.name, 'a') self.assertEqual(b.name, 'b') self.assertEqual(c.name, 'd') def test_symbol_add(self): x, y = symbols('x y') new = x + y self.assertIsInstance(new, sympy.Add) def test_evaluate_model(self): A = Parameter() x = Variable() new = A * x ** 2 self.assertEqual(new(x=2, A=2), 8) self.assertNotEqual(new(x=2, A=3), 8) def test_symbol_object_add(self): from sympy.core.symbol import Symbol x = Symbol('x') y = Symbol('y') new = x + y self.assertIsInstance(new, sympy.Add) def test_simple_sigma(self): from symfit.api import Variable, Parameter, Fit t_data = np.array([1.4, 2.1, 2.6, 3.0, 3.3]) y_data = np.array([10, 20, 30, 40, 50]) sigma = 0.2 n = np.array([5, 3, 8, 15, 30]) sigma_t = sigma / np.sqrt(n) # We now define our model y = Variable() g = Parameter() t_model = (2 * y / g)**0.5 fit = Fit(t_model, y_data, t_data)#, sigma=sigma_t) fit_result = fit.execute() # h_smooth = np.linspace(0,60,100) # t_smooth = t_model(y=h_smooth, **fit_result.params) # Lets with the results from curve_fit, no weights popt_noweights, pcov_noweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data) self.assertAlmostEqual(fit_result.params.g, popt_noweights[0]) self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_noweights[0, 0])) # Same sigma everywere fit = Fit(t_model, y_data, t_data, sigma=0.0031, absolute_sigma=False) fit_result = fit.execute() popt_sameweights, pcov_sameweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=0.0031, absolute_sigma=False) self.assertAlmostEqual(fit_result.params.g, popt_sameweights[0], 4) self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_sameweights[0, 0]), 4) # Same weight everywere should be the same as no weight. self.assertAlmostEqual(fit_result.params.g, popt_noweights[0], 4) self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_noweights[0, 0]), 4) # Different sigma for every point fit = Fit(t_model, y_data, t_data, sigma=0.1*sigma_t, absolute_sigma=False) fit_result = fit.execute() popt, pcov = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=.1*sigma_t) self.assertAlmostEqual(fit_result.params.g, popt[0]) self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov[0, 0])) self.assertAlmostEqual(fit_result.params.g, 9.095, 3) self.assertAlmostEqual(fit_result.params.g_stdev, 0.102, 3) # according to Mathematica def test_error_advanced(self): """ Models an example from the mathematica docs and try's to replicate it: http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html """ data = [ [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6], [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.], [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4], [4.7, 8.4, 10.] ] x, y, z = zip(*data) xy = np.vstack((x, y)) z = np.array(z) errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2]) # raise Exception(xy, z) a = Parameter() b = Parameter(0.9) c = Parameter(5) x = Variable() y = Variable() model = a * log(b * x + c * y) fit = Fit(model, xy, z, absolute_sigma=False) fit_result = fit.execute() print(fit_result) # Same as Mathematica default behavior. self.assertAlmostEqual(fit_result.params.a, 2.9956, 4) self.assertAlmostEqual(fit_result.params.b, 0.563212, 4) self.assertAlmostEqual(fit_result.params.c, 3.59732, 4) self.assertAlmostEqual(fit_result.params.a_stdev, 0.278304, 4) self.assertAlmostEqual(fit_result.params.b_stdev, 0.224107, 4) self.assertAlmostEqual(fit_result.params.c_stdev, 0.980352, 4) fit = Fit(model, xy, z, absolute_sigma=True) fit_result = fit.execute() # Same as Mathematica in Measurement error mode, but without suplying # any errors. self.assertAlmostEqual(fit_result.params.a, 2.9956, 4) self.assertAlmostEqual(fit_result.params.b, 0.563212, 4) self.assertAlmostEqual(fit_result.params.c, 3.59732, 4) self.assertAlmostEqual(fit_result.params.a_stdev, 0.643259, 4) self.assertAlmostEqual(fit_result.params.b_stdev, 0.517992, 4) self.assertAlmostEqual(fit_result.params.c_stdev, 2.26594, 4) fit = Fit(model, xy, z, sigma=errors) fit_result = fit.execute() popt, pcov, infodict, errmsg, ier = curve_fit(lambda x_vec, a, b, c: a * np.log(b * x_vec[0] + c * x_vec[1]), xy, z, sigma=errors, absolute_sigma=True, full_output=True) # Same as curve_fit? self.assertAlmostEqual(fit_result.params.a, popt[0], 4) self.assertAlmostEqual(fit_result.params.b, popt[1], 4) self.assertAlmostEqual(fit_result.params.c, popt[2], 4) self.assertAlmostEqual(fit_result.params.a_stdev, np.sqrt(pcov[0,0]), 4) self.assertAlmostEqual(fit_result.params.b_stdev, np.sqrt(pcov[1,1]), 4) self.assertAlmostEqual(fit_result.params.c_stdev, np.sqrt(pcov[2,2]), 4) # Same as Mathematica with MEASUREMENT ERROR self.assertAlmostEqual(fit_result.params.a, 2.68807, 4) self.assertAlmostEqual(fit_result.params.b, 0.941344, 4) self.assertAlmostEqual(fit_result.params.c, 5.01541, 4) self.assertAlmostEqual(fit_result.params.a_stdev, 0.0974628, 4) self.assertAlmostEqual(fit_result.params.b_stdev, 0.247018, 4) self.assertAlmostEqual(fit_result.params.c_stdev, 0.597661, 4) def test_error_analytical(self): """ Test using a case where the analytical answer is known. Modeled after: http://nbviewer.ipython.org/urls/gist.github.com/taldcroft/5014170/raw/31e29e235407e4913dc0ec403af7ed524372b612/curve_fit.ipynb """ N = 10000 sigma = 10 xn = np.arange(N, dtype=np.float) yn = np.zeros_like(xn) yn = yn + np.random.normal(size=len(yn), scale=sigma) a = Parameter() model = a fit = Fit(model, xn, yn, sigma=sigma) fit_result = fit.execute() popt, pcov = curve_fit(lambda x, a: a * np.ones_like(x), xn, yn, sigma=sigma, absolute_sigma=True) self.assertAlmostEqual(fit_result.params.a, popt[0], 5) self.assertAlmostEqual(fit_result.params.a_stdev, np.sqrt(np.diag(pcov))[0], 2) fit_no_sigma = Fit(model, xn, yn) fit_result_no_sigma = fit_no_sigma.execute() popt, pcov = curve_fit(lambda x, a: a * np.ones_like(x), xn, yn,) # With or without sigma, the bestfit params should be in agreement in case of equal weights self.assertAlmostEqual(fit_result.params.a, fit_result_no_sigma.params.a, 5) # Since symfit is all about absolute errors, the sigma will not be in agreement self.assertNotEqual(fit_result.params.a_stdev, fit_result_no_sigma.params.a_stdev, 5) self.assertAlmostEqual(fit_result_no_sigma.params.a, popt[0], 5) self.assertAlmostEqual(fit_result_no_sigma.params.a_stdev, pcov[0][0]**0.5, 5) # Analytical answer for mean of N(0,1): mu = 0.0 sigma_mu = sigma/N**0.5 # self.assertAlmostEqual(fit_result.params.a, mu, 5) self.assertAlmostEqual(fit_result.params.a_stdev, sigma_mu, 5) def test_straight_line_analytical(self): """ Test symfit against a straight line, for which the parameters and their uncertainties are known analytically. Assuming equal weights. :return: """ data = [[0, 1], [1, 0], [3, 2], [5, 4]] x, y = (np.array(i, dtype='float64') for i in zip(*data)) # x = np.arange(0, 100, 0.1) # np.random.seed(10) # y = 3.0*x + 105.0 + np.random.normal(size=x.shape) dx = x - x.mean() dy = y - y.mean() mean_squared_x = np.mean(x**2) - np.mean(x)**2 mean_xy = np.mean(x * y) - np.mean(x)*np.mean(y) a = mean_xy/mean_squared_x b = y.mean() - a * x.mean() self.assertAlmostEqual(a, 0.694915, 6) # values from Mathematica self.assertAlmostEqual(b, 0.186441, 6) print(a, b) S = np.sum((y - (a*x + b))**2) var_a_exact = S/(len(x) * (len(x) - 2) * mean_squared_x) var_b_exact = var_a_exact*np.mean(x ** 2) a_exact = a b_exact = b # We will now compare these exact results with values from symfit a, b, x_var = Parameter(name='a', value=3.0), Parameter(name='b'), Variable(name='x') model = a*x_var + b fit = Fit(model, x, y, absolute_sigma=False) fit_result = fit.execute() popt, pcov = curve_fit(lambda z, c, d: c * z + d, x, y, Dfun=lambda p, x, y, func: np.transpose([x, np.ones_like(x)])) # Dfun=lambda p, x, y, func: print(p, func, x, y)) # curve_fit self.assertAlmostEqual(a_exact, popt[0], 4) self.assertAlmostEqual(b_exact, popt[1], 4) self.assertAlmostEqual(var_a_exact, pcov[0][0], 6) self.assertAlmostEqual(var_b_exact, pcov[1][1], 6) self.assertAlmostEqual(a_exact, fit_result.params.a, 4) self.assertAlmostEqual(b_exact, fit_result.params.b, 4) self.assertAlmostEqual(var_a_exact**0.5, fit_result.params.a_stdev, 6) self.assertAlmostEqual(var_b_exact**0.5, fit_result.params.b_stdev, 6) if __name__ == '__main__': unittest.main()
from gi.repository import GLib from gi.repository import Gtk import xl.unicode from xl import event, main, plugins, xdg from xlgui.widgets import common, dialogs from xl.nls import gettext as _, ngettext import logging logger = logging.getLogger(__name__) name = _('Plugins') ui = xdg.get_data_path('ui', 'preferences', 'plugin.ui') class PluginManager(object): """ Gui to manage plugins """ def __init__(self, preferences, builder): """ Initializes the manager """ self.preferences = preferences builder.connect_signals(self) self.plugins = main.exaile().plugins self.message = dialogs.MessageBar( parent=builder.get_object('preferences_pane'), buttons=Gtk.ButtonsType.CLOSE ) self.message.connect('response', self.on_messagebar_response) self.list = builder.get_object('plugin_tree') self.enabled_cellrenderer = builder.get_object('enabled_cellrenderer') if main.exaile().options.Debug: reload_cellrenderer = common.ClickableCellRendererPixbuf() reload_cellrenderer.props.icon_name = 'view-refresh' reload_cellrenderer.props.xalign = 1 reload_cellrenderer.connect('clicked', self.on_reload_cellrenderer_clicked) name_column = builder.get_object('name_column') name_column.pack_start(reload_cellrenderer, True) name_column.add_attribute(reload_cellrenderer, 'visible', 3) self.version_label = builder.get_object('version_label') self.author_label = builder.get_object('author_label') self.name_label = builder.get_object('name_label') self.description = builder.get_object('description_view') self.model = builder.get_object('model') self.filter_model = self.model.filter_new() self.show_incompatible_cb = builder.get_object('show_incompatible_cb') self.filter_model.set_visible_func(self._model_visible_func) selection = self.list.get_selection() selection.connect('changed', self.on_selection_changed) self._load_plugin_list() self._evt_rm1 = event.add_ui_callback( self.on_plugin_event, 'plugin_enabled', None, True ) self._evt_rm2 = event.add_ui_callback( self.on_plugin_event, 'plugin_disabled', None, False ) self.list.connect('destroy', self.on_destroy) GLib.idle_add(selection.select_path, (0,)) GLib.idle_add(self.list.grab_focus) def _load_plugin_list(self): """ Loads the plugin list """ plugins = self.plugins.list_installed_plugins() uncategorized = _('Uncategorized') plugins_dict = {uncategorized: []} failed_list = [] self.plugin_to_path = {} for plugin_name in plugins: try: info = self.plugins.get_plugin_info(plugin_name) compatible = self.plugins.is_compatible(info) broken = self.plugins.is_potentially_broken(info) except Exception: failed_list += [plugin_name] continue # determine icon to show if not compatible: icon = 'dialog-error' elif broken: icon = 'dialog-warning' else: icon = None enabled = plugin_name in self.plugins.enabled_plugins plugin_data = ( plugin_name, info['Name'], str(info['Version']), enabled, icon, broken, compatible, True, ) if 'Category' in info: cat = plugins_dict.setdefault(info['Category'], []) cat.append(plugin_data) else: plugins_dict[uncategorized].append(plugin_data) self.list.set_model(None) self.model.clear() def categorykey(item): if item[0] == uncategorized: return '\xff' * 10 return xl.unicode.strxfrm(item[0]) plugins_dict = sorted(plugins_dict.iteritems(), key=categorykey) for category, plugins_list in plugins_dict: plugins_list.sort(key=lambda x: xl.unicode.strxfrm(x[1])) it = self.model.append( None, (None, category, '', False, '', False, True, False) ) for plugin_data in plugins_list: pit = self.model.append(it, plugin_data) path = self.model.get_string_from_iter(pit) self.plugin_to_path[plugin_data[0]] = path self.list.set_model(self.filter_model) # TODO: Keep track of which categories are already expanded, and only expand those self.list.expand_all() if failed_list: self.message.show_error( _('Could not load plugin info!'), ngettext('Failed plugin: %s', 'Failed plugins: %s', len(failed_list)) % ', '.join(failed_list), ) def on_destroy(self, widget): self._evt_rm1() self._evt_rm2() def on_messagebar_response(self, widget, response): """ Hides the messagebar if requested """ if response == Gtk.ResponseType.CLOSE: widget.hide() def on_plugin_tree_row_activated(self, tree, path, column): """ Enables or disables the selected plugin """ self.enabled_cellrenderer.emit('toggled', path[0]) def on_reload_cellrenderer_clicked(self, cellrenderer, path): """ Reloads a plugin from scratch """ plugin_name = self.filter_model[path][0] enabled = self.filter_model[path][3] if enabled: try: self.plugins.disable_plugin(plugin_name) except Exception as e: self.message.show_error(_('Could not disable plugin!'), str(e)) return logger.info('Reloading plugin %s...', plugin_name) self.plugins.load_plugin(plugin_name, reload_plugin=True) if enabled: try: self.plugins.enable_plugin(plugin_name) except Exception as e: self.message.show_error(_('Could not enable plugin!'), str(e)) return def on_install_plugin_button_clicked(self, button): """ Shows a dialog allowing the user to choose a plugin to install from the filesystem """ dialog = Gtk.FileChooserDialog( _('Choose a Plugin'), self.preferences.parent, buttons=( Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_ADD, Gtk.ResponseType.OK, ), ) filter = Gtk.FileFilter() filter.set_name(_('Plugin Archives')) filter.add_pattern("*.exz") filter.add_pattern("*.tar.gz") filter.add_pattern("*.tar.bz2") dialog.add_filter(filter) filter = Gtk.FileFilter() filter.set_name(_('All Files')) filter.add_pattern('*') dialog.add_filter(filter) result = dialog.run() dialog.hide() if result == Gtk.ResponseType.OK: try: self.plugins.install_plugin(dialog.get_filename()) except plugins.InvalidPluginError as e: self.message.show_error(_('Plugin file installation failed!'), str(e)) return self._load_plugin_list() def on_selection_changed(self, selection, user_data=None): """ Called when a row is selected """ model, paths = selection.get_selected_rows() if not paths: return row = model[paths[0]] if not row[7]: self.author_label.set_label('') self.description.get_buffer().set_text('') self.name_label.set_label('') return info = self.plugins.get_plugin_info(row[0]) self.author_label.set_label(",\n".join(info['Authors'])) self.description.get_buffer().set_text(info['Description'].replace(r'\n', "\n")) self.name_label.set_markup( "<b>%s</b> <small>%s</small>" % (info['Name'], info['Version']) ) def on_enabled_cellrenderer_toggled(self, cellrenderer, path): """ Called when the checkbox is toggled """ path = Gtk.TreePath.new_from_string(path) plugin_name = self.filter_model[path][0] if plugin_name is None: return enable = not self.filter_model[path][3] if enable: try: self.plugins.enable_plugin(plugin_name) except Exception as e: self.message.show_error(_('Could not enable plugin!'), str(e)) return else: try: self.plugins.disable_plugin(plugin_name) except Exception as e: self.message.show_error(_('Could not disable plugin!'), str(e)) return self.on_selection_changed(self.list.get_selection()) def on_plugin_event(self, evtname, obj, plugin_name, enabled): if hasattr(self.plugins.loaded_plugins[plugin_name], 'get_preferences_pane'): self.preferences._load_plugin_pages() path = self.plugin_to_path[plugin_name] self.model[path][3] = enabled def on_show_incompatible_cb_toggled(self, widget): self.filter_model.refilter() def _model_visible_func(self, model, iter, data): row = model[iter] compatible = row[6] return compatible or self.show_incompatible_cb.get_active() def init(preferences, xml): PluginManager(preferences, xml)
__doc__="""FSP3000R7NCU FSP3000R7NCU is a component of a FSP3000R7Device Device """ from ZenPacks.Merit.AdvaFSP3000R7.lib.FSP3000R7Component import * import logging log = logging.getLogger('FSP3000R7NCU') class FSP3000R7NCU(FSP3000R7Component): """FSP3000R7NCU object""" portal_type = meta_type = 'FSP3000R7NCU' _relations = (("FSP3000R7Dev", ToOne(ToManyCont, "ZenPacks.Merit.AdvaFSP3000R7.FSP3000R7Device", "FSP3000R7Ncu")), ) InitializeClass(FSP3000R7NCU)
import re from random import randint from helpers.orm import Scores from helpers.command import Command def pluralize(s, n): if n == 1: return s else: return s + 's' @Command('score', ['config', 'db', 'botnick']) def cmd(send, msg, args): """Gets scores. Syntax: {command} <--high|--low|nick> """ if not args['config']['feature'].getboolean('hooks'): send("Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).") return session = args['db'] match = re.match('--(.+)', msg) if match: if match.group(1) == 'high': data = session.query(Scores).order_by(Scores.score.desc()).limit(3).all() send('High Scores:') for x in data: send("%s: %s" % (x.nick, x.score)) elif match.group(1) == 'low': data = session.query(Scores).order_by(Scores.score).limit(3).all() send('Low Scores:') for x in data: send("%s: %s" % (x.nick, x.score)) else: send("%s is not a valid flag" % match.group(1)) return matches = re.findall('(%s+)' % args['config']['core']['nickregex'], msg) if matches: for match in matches: name = match.lower() if name == 'c': send("We all know you love C better than anything else, so why rub it in?") return score = session.query(Scores).filter(Scores.nick == name).scalar() if score is not None: if name == args['botnick'].lower(): output = 'has %s %s! :)' % (score.score, pluralize('point', score.score)) send(output, 'action') else: send("%s has %i %s!" % (name, score.score, pluralize('point', score.score))) else: send("Nobody cares about %s" % name) elif msg: send("Invalid nick") else: count = session.query(Scores).count() if count == 0: send("Nobody cares about anything =(") else: randid = randint(1, count) query = session.query(Scores).get(randid) send("%s has %i %s!" % (query.nick, query.score, pluralize('point', query.score)))
import logging import time import types from autotest.client.shared import error from virttest import utils_misc, utils_test, aexpect def run(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Send a migration command to the source VM and wait until it's finished. 4) Kill off the source VM. 3) Log into the destination VM after the migration is finished. 4) Compare the output of a reference command executed on the source with the output of the same command on the destination machine. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def guest_stress_start(guest_stress_test): """ Start a stress test in guest, Could be 'iozone', 'dd', 'stress' :param type: type of stress test. """ from tests import autotest_control timeout = 0 if guest_stress_test == "autotest": test_type = params.get("test_type") func = autotest_control.run_autotest_control new_params = params.copy() new_params["test_control_file"] = "%s.control" % test_type args = (test, new_params, env) timeout = 60 elif guest_stress_test == "dd": vm = env.get_vm(env, params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) func = session.cmd_output args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M " "count=100; rm -f /tmp/test; done", login_timeout, logging.info) logging.info("Start %s test in guest", guest_stress_test) bg = utils_test.BackgroundTest(func, args) params["guest_stress_test_pid"] = bg bg.start() if timeout: logging.info("sleep %ds waiting guest test start.", timeout) time.sleep(timeout) if not bg.is_alive(): raise error.TestFail("Failed to start guest test!") def guest_stress_deamon(): """ This deamon will keep watch the status of stress in guest. If the stress program is finished before migration this will restart it. """ while True: bg = params.get("guest_stress_test_pid") action = params.get("action") if action == "run": logging.debug("Check if guest stress is still running") guest_stress_test = params.get("guest_stress_test") if bg and not bg.is_alive(): logging.debug("Stress process finished, restart it") guest_stress_start(guest_stress_test) time.sleep(30) else: logging.debug("Stress still on") else: if bg and bg.is_alive(): try: stress_stop_cmd = params.get("stress_stop_cmd") vm = env.get_vm(env, params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() if stress_stop_cmd: logging.warn("Killing background stress process " "with cmd '%s', you would see some " "error message in client test result," "it's harmless.", stress_stop_cmd) session.cmd(stress_stop_cmd) bg.join(10) except Exception: pass break time.sleep(10) def get_functions(func_names, locals_dict): """ Find sub function(s) in this function with the given name(s). """ if not func_names: return [] funcs = [] for f in func_names.split(): f = locals_dict.get(f) if isinstance(f, types.FunctionType): funcs.append(f) return funcs def mig_set_speed(): mig_speed = params.get("mig_speed", "1G") return vm.monitor.migrate_set_speed(mig_speed) login_timeout = int(params.get("login_timeout", 360)) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 mig_exec_cmd_src = params.get("migration_exec_cmd_src") mig_exec_cmd_dst = params.get("migration_exec_cmd_dst") if mig_exec_cmd_src and "gzip" in mig_exec_cmd_src: mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec") mig_exec_file += "-%s" % utils_misc.generate_random_string(8) mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file offline = params.get("offline", "no") == "yes" check = params.get("vmstate_check", "no") == "yes" living_guest_os = params.get("migration_living_guest", "yes") == "yes" deamon_thread = None vm = env.get_vm(params["main_vm"]) vm.verify_alive() if living_guest_os: session = vm.wait_for_login(timeout=login_timeout) # Get the output of migration_test_command test_command = params.get("migration_test_command") reference_output = session.cmd_output(test_command) # Start some process in the background (and leave the session open) background_command = params.get("migration_bg_command", "") session.sendline(background_command) time.sleep(5) # Start another session with the guest and make sure the background # process is running session2 = vm.wait_for_login(timeout=login_timeout) try: check_command = params.get("migration_bg_check_command", "") session2.cmd(check_command, timeout=30) session2.close() # run some functions before migrate start. pre_migrate = get_functions(params.get("pre_migrate"), locals()) for func in pre_migrate: func() # Start stress test in guest. guest_stress_test = params.get("guest_stress_test") if guest_stress_test: guest_stress_start(guest_stress_test) params["action"] = "run" deamon_thread = utils_test.BackgroundTest( guest_stress_deamon, ()) deamon_thread.start() # Migrate the VM ping_pong = params.get("ping_pong", 1) for i in xrange(int(ping_pong)): if i % 2 == 0: logging.info("Round %s ping..." % str(i / 2)) else: logging.info("Round %s pong..." % str(i / 2)) vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst) # Set deamon thread action to stop after migrate params["action"] = "stop" # run some functions after migrate finish. post_migrate = get_functions(params.get("post_migrate"), locals()) for func in post_migrate: func() # Log into the guest again logging.info("Logging into guest after migration...") session2 = vm.wait_for_login(timeout=30) logging.info("Logged in after migration") # Make sure the background process is still running session2.cmd(check_command, timeout=30) # Get the output of migration_test_command output = session2.cmd_output(test_command) # Compare output to reference output if output != reference_output: logging.info("Command output before migration differs from " "command output after migration") logging.info("Command: %s", test_command) logging.info("Output before:" + utils_misc.format_str_for_message(reference_output)) logging.info("Output after:" + utils_misc.format_str_for_message(output)) raise error.TestFail("Command '%s' produced different output " "before and after migration" % test_command) finally: # Kill the background process if session2 and session2.is_alive(): bg_kill_cmd = params.get("migration_bg_kill_command", None) if bg_kill_cmd is not None: try: session2.cmd(bg_kill_cmd) except aexpect.ShellTimeoutError: logging.debug("Remote session not responsive, " "shutting down VM %s", vm.name) vm.destroy(gracefully=True) if deamon_thread is not None: # Set deamon thread action to stop after migrate params["action"] = "stop" deamon_thread.join() else: # Just migrate without depending on a living guest OS vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst)
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name="blake-archive", version="0.1", description="Blake archive web app", license="Closed source", packages=['blake', 'test'], long_description=read('README'), classifiers=["Development Status :: 3 - Alpha"], install_requires=["flask", "sqlalchemy", "flask-sqlalchemy", 'lxml', 'xmltodict', "nose", 'tablib'] )
from openerp.osv import osv, fields class res_partner(osv.Model): _inherit = 'res.partner' _order = "parent_left" _parent_order = "ref" _parent_store = True _columns = { 'parent_right': fields.integer('Parent Right', select=1), 'parent_left': fields.integer('Parent Left', select=1), }
from py.test import mark from translate.filters import checks from translate.lang import data from translate.storage import po, xliff def strprep(str1, str2, message=None): return data.normalized_unicode(str1), data.normalized_unicode(str2), data.normalized_unicode(message) def passes(filterfunction, str1, str2): """returns whether the given strings pass on the given test, handling FilterFailures""" str1, str2, no_message = strprep(str1, str2) try: filterresult = filterfunction(str1, str2) except checks.FilterFailure, e: filterresult = False return filterresult def fails(filterfunction, str1, str2, message=None): """returns whether the given strings fail on the given test, handling only FilterFailures""" str1, str2, message = strprep(str1, str2, message) try: filterresult = filterfunction(str1, str2) except checks.SeriousFilterFailure, e: filterresult = True except checks.FilterFailure, e: if message: exc_message = e.messages[0] filterresult = exc_message != message print exc_message.encode('utf-8') else: filterresult = False return not filterresult def fails_serious(filterfunction, str1, str2, message=None): """returns whether the given strings fail on the given test, handling only SeriousFilterFailures""" str1, str2, message = strprep(str1, str2, message) try: filterresult = filterfunction(str1, str2) except checks.SeriousFilterFailure, e: if message: exc_message = e.messages[0] filterresult = exc_message != message print exc_message.encode('utf-8') else: filterresult = False return not filterresult def test_defaults(): """tests default setup and that checks aren't altered by other constructions""" stdchecker = checks.StandardChecker() assert stdchecker.config.varmatches == [] mozillachecker = checks.MozillaChecker() stdchecker = checks.StandardChecker() assert stdchecker.config.varmatches == [] def test_construct(): """tests that the checkers can be constructed""" stdchecker = checks.StandardChecker() mozillachecker = checks.MozillaChecker() ooochecker = checks.OpenOfficeChecker() gnomechecker = checks.GnomeChecker() kdechecker = checks.KdeChecker() def test_accelerator_markers(): """test that we have the correct accelerator marker for the various default configs""" stdchecker = checks.StandardChecker() assert stdchecker.config.accelmarkers == [] mozillachecker = checks.MozillaChecker() assert mozillachecker.config.accelmarkers == ["&"] ooochecker = checks.OpenOfficeChecker() assert ooochecker.config.accelmarkers == ["~"] gnomechecker = checks.GnomeChecker() assert gnomechecker.config.accelmarkers == ["_"] kdechecker = checks.KdeChecker() assert kdechecker.config.accelmarkers == ["&"] def test_messages(): """test that our helpers can check for messages and that these error messages can contain Unicode""" stdchecker = checks.StandardChecker(checks.CheckerConfig(validchars='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')) assert fails(stdchecker.validchars, "Some unexpected characters", "©", "Invalid characters: '©' (\\u00a9)") stdchecker = checks.StandardChecker() assert fails_serious(stdchecker.escapes, r"A tab", r"'n Ṱab\t", r"""Escapes in original () don't match escapes in translation ('Ṱab\t')""") def test_accelerators(): """tests accelerators""" stdchecker = checks.StandardChecker(checks.CheckerConfig(accelmarkers="&")) assert passes(stdchecker.accelerators, "&File", "&Fayile") assert fails(stdchecker.accelerators, "&File", "Fayile") assert fails(stdchecker.accelerators, "File", "&Fayile") assert passes(stdchecker.accelerators, "Mail && News", "Pos en Nuus") assert fails(stdchecker.accelerators, "Mail &amp; News", "Pos en Nuus") assert passes(stdchecker.accelerators, "&Allow", u'&\ufeb2\ufee3\ufe8e\ufea3') assert fails(stdchecker.accelerators, "Open &File", "Vula& Ifayile") kdechecker = checks.KdeChecker() assert passes(kdechecker.accelerators, "&File", "&Fayile") assert fails(kdechecker.accelerators, "&File", "Fayile") assert fails(kdechecker.accelerators, "File", "&Fayile") gnomechecker = checks.GnomeChecker() assert passes(gnomechecker.accelerators, "_File", "_Fayile") assert fails(gnomechecker.accelerators, "_File", "Fayile") assert fails(gnomechecker.accelerators, "File", "_Fayile") assert fails(gnomechecker.accelerators, "_File", "_Fayil_e") mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.accelerators, "&File", "&Fayile") assert passes(mozillachecker.accelerators, "Warn me if this will disable any of my add&-ons", "&Waarsku my as dit enige van my byvoegings sal deaktiveer") assert fails_serious(mozillachecker.accelerators, "&File", "Fayile") assert fails_serious(mozillachecker.accelerators, "File", "&Fayile") assert passes(mozillachecker.accelerators, "Mail &amp; News", "Pos en Nuus") assert fails_serious(mozillachecker.accelerators, "Mail &amp; News", "Pos en &Nuus") assert fails_serious(mozillachecker.accelerators, "&File", "Fayile") ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.accelerators, "~File", "~Fayile") assert fails(ooochecker.accelerators, "~File", "Fayile") assert fails(ooochecker.accelerators, "File", "~Fayile") # We don't want an accelerator for letters with a diacritic assert fails(ooochecker.accelerators, "F~ile", "L~êer") # Bug 289: accept accented accelerator characters afchecker = checks.StandardChecker(checks.CheckerConfig(accelmarkers="&", targetlanguage="fi")) assert passes(afchecker.accelerators, "&Reload Frame", "P&äivitä kehys") # Problems: # Accelerator before variable - see test_acceleratedvariables @mark.xfail(reason="Accelerated variables needs a better implementation") def test_acceleratedvariables(): """test for accelerated variables""" # FIXME: disabled since acceleratedvariables has been removed, but these checks are still needed mozillachecker = checks.MozillaChecker() assert fails(mozillachecker.acceleratedvariables, "%S &Options", "&%S Ikhetho") assert passes(mozillachecker.acceleratedvariables, "%S &Options", "%S &Ikhetho") ooochecker = checks.OpenOfficeChecker() assert fails(ooochecker.acceleratedvariables, "%PRODUCTNAME% ~Options", "~%PRODUCTNAME% Ikhetho") assert passes(ooochecker.acceleratedvariables, "%PRODUCTNAME% ~Options", "%PRODUCTNAME% ~Ikhetho") def test_acronyms(): """tests acronyms""" stdchecker = checks.StandardChecker() assert passes(stdchecker.acronyms, "An HTML file", "'n HTML leer") assert fails(stdchecker.acronyms, "An HTML file", "'n LMTH leer") assert passes(stdchecker.acronyms, "It is HTML.", "Dit is HTML.") # We don't mind if you add an acronym to correct bad capitalisation in the original assert passes(stdchecker.acronyms, "An html file", "'n HTML leer") # We shouldn't worry about acronyms that appear in a musttranslate file stdchecker = checks.StandardChecker(checks.CheckerConfig(musttranslatewords=["OK"])) assert passes(stdchecker.acronyms, "OK", "Kulungile") # Assert punctuation should not hide accronyms assert fails(stdchecker.acronyms, "Location (URL) not found", "Blah blah blah") # Test '-W' (bug 283) assert passes(stdchecker.acronyms, "%s: option `-W %s' is ambiguous", "%s: opsie '-W %s' is dubbelsinnig") def test_blank(): """tests blank""" stdchecker = checks.StandardChecker() assert fails(stdchecker.blank, "Save as", " ") assert fails(stdchecker.blank, "_: KDE comment\\n\nSimple string", " ") def test_brackets(): """tests brackets""" stdchecker = checks.StandardChecker() assert passes(stdchecker.brackets, "N number(s)", "N getal(le)") assert fails(stdchecker.brackets, "For {sic} numbers", "Vier getalle") assert fails(stdchecker.brackets, "For }sic{ numbers", "Vier getalle") assert fails(stdchecker.brackets, "For [sic] numbers", "Vier getalle") assert fails(stdchecker.brackets, "For ]sic[ numbers", "Vier getalle") assert passes(stdchecker.brackets, "{[(", "[({") def test_compendiumconflicts(): """tests compendiumconflicts""" stdchecker = checks.StandardChecker() assert fails(stdchecker.compendiumconflicts, "File not saved", r"""#-#-#-#-# file1.po #-#-#-#-#\n Leer nie gestoor gestoor nie\n Leer nie gestoor""") def test_doublequoting(): """tests double quotes""" stdchecker = checks.StandardChecker() assert fails(stdchecker.doublequoting, "Hot plate", "\"Ipuleti\" elishisa") assert passes(stdchecker.doublequoting, "\"Hot\" plate", "\"Ipuleti\" elishisa") assert fails(stdchecker.doublequoting, "'Hot' plate", "\"Ipuleti\" elishisa") assert passes(stdchecker.doublequoting, "\\\"Hot\\\" plate", "\\\"Ipuleti\\\" elishisa") # We don't want the filter to complain about "untranslated" quotes in xml attributes frchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="fr")) assert passes(frchecker.doublequoting, "Click <a href=\"page.html\">", "Clique <a href=\"page.html\">") assert fails(frchecker.doublequoting, "Do \"this\"", "Do \"this\"") assert passes(frchecker.doublequoting, "Do \"this\"", "Do « this »") assert fails(frchecker.doublequoting, "Do \"this\"", "Do « this » « this »") # This used to fail because we strip variables, and was left with an empty quotation that was not converted assert passes(frchecker.doublequoting, u"Copying `%s' to `%s'", u"Copie de « %s » vers « %s »") vichecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="vi")) assert passes(vichecker.doublequoting, 'Save "File"', u"Lưu « Tập tin »") # Had a small exception with such a case: eschecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="es")) assert passes(eschecker.doublequoting, "<![CDATA[ Enter the name of the Windows workgroup that this server should appear in. ]]>", "<![CDATA[ Ingrese el nombre del grupo de trabajo de Windows en el que debe aparecer este servidor. ]]>") def test_doublespacing(): """tests double spacing""" stdchecker = checks.StandardChecker() assert passes(stdchecker.doublespacing, "Sentence. Another sentence.", "Sin. 'n Ander sin.") assert passes(stdchecker.doublespacing, "Sentence. Another sentence.", "Sin. No double spacing.") assert fails(stdchecker.doublespacing, "Sentence. Another sentence.", "Sin. Missing the double space.") assert fails(stdchecker.doublespacing, "Sentence. Another sentence.", "Sin. Uneeded double space in translation.") ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.doublespacing, "Execute %PROGRAMNAME Calc", "Blah %PROGRAMNAME Calc") assert passes(ooochecker.doublespacing, "Execute %PROGRAMNAME Calc", "Blah % PROGRAMNAME Calc") def test_doublewords(): """tests doublewords""" stdchecker = checks.StandardChecker() assert passes(stdchecker.doublewords, "Save the rhino", "Save the rhino") assert fails(stdchecker.doublewords, "Save the rhino", "Save the the rhino") # Double variables are not an error stdchecker = checks.StandardChecker(checks.CheckerConfig(varmatches=[("%", 1)])) assert passes(stdchecker.doublewords, "%s %s installation", "tsenyo ya %s %s") # Double XML tags are not an error stdchecker = checks.StandardChecker() assert passes(stdchecker.doublewords, "Line one <br> <br> line two", "Lyn een <br> <br> lyn twee") # In some language certain double words are not errors st_checker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="st")) assert passes(st_checker.doublewords, "Color to draw the name of a message you sent.", "Mmala wa ho taka bitso la molaetsa oo o o rometseng.") assert passes(st_checker.doublewords, "Ten men", "Banna ba ba leshome") assert passes(st_checker.doublewords, "Give SARS the tax", "Lekgetho le le fe SARS") def test_endpunc(): """tests endpunc""" stdchecker = checks.StandardChecker() assert passes(stdchecker.endpunc, "Question?", "Correct?") assert fails(stdchecker.endpunc, " Question?", "Wrong ?") # Newlines must not mask end punctuation assert fails(stdchecker.endpunc, "Exit change recording mode?\n\n", "Phuma esimeni sekugucula kubhalisa.\n\n") mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.endpunc, "Upgrades an existing $ProductShortName$ installation.", "Ku antswisiwa ka ku nghenisiwa ka $ProductShortName$.") # Real examples assert passes(stdchecker.endpunc, "A nickname that identifies this publishing site (e.g.: 'MySite')", "Vito ro duvulela leri tirhisiwaka ku kuma sayiti leri ro kandziyisa (xik.: 'Sayiti ra Mina')") assert fails(stdchecker.endpunc, "Question", u"Wrong\u2026") # Making sure singlequotes don't confuse things assert passes(stdchecker.endpunc, "Pseudo-elements can't be negated '%1$S'.", "Pseudo-elemente kan nie '%1$S' ontken word nie.") stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='km')) assert passes(stdchecker.endpunc, "In this new version, there are some minor conversion improvements on complex style in Openoffice.org Writer.", u"នៅ​ក្នុង​កំណែ​ថ្មីនេះ មាន​ការ​កែសម្រួល​មួយ​ចំនួន​តូច​ទាក់​ទង​នឹង​ការ​បំលែង​ពុម្ពអក្សរ​ខ្មែរ​ ក្នុង​កម្មវិធី​ការិយាល័យ​ ស្លឹករឹត ដែល​មាន​ប្រើ​ប្រាស់​រចនាប័ទ្មស្មុគស្មាញច្រើន\u00a0។") stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='zh')) assert passes(stdchecker.endpunc, "To activate your account, follow this link:\n", u"要啟用戶口,請瀏覽這個鏈結:\n") stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='vi')) assert passes(stdchecker.endpunc, "Do you want to delete the XX dialog?", u"Bạn có muốn xoá hộp thoại XX không?") stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='fr')) assert passes(stdchecker.endpunc, "Header:", u"En-tête :") assert passes(stdchecker.endpunc, "Header:", u"En-tête\u00a0:") def test_endwhitespace(): """tests endwhitespace""" stdchecker = checks.StandardChecker() assert passes(stdchecker.endwhitespace, "A setence.", "I'm correct.") assert passes(stdchecker.endwhitespace, "A setence. ", "I'm correct. ") assert fails(stdchecker.endwhitespace, "A setence. ", "'I'm incorrect.") assert passes(stdchecker.endwhitespace, "Problem with something: %s\n", "Probleem met iets: %s\n") zh_checker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='zh')) # This should pass since the space is not needed in Chinese assert passes(zh_checker.endwhitespace, "Init. Limit: ", "起始时间限制:") def test_escapes(): """tests escapes""" stdchecker = checks.StandardChecker() assert passes(stdchecker.escapes, r"""A sentence""", "I'm correct.") assert passes(stdchecker.escapes, "A file\n", "'n Leer\n") assert fails_serious(stdchecker.escapes, r"blah. A file", r"bleah.\n'n leer") assert passes(stdchecker.escapes, r"A tab\t", r"'n Tab\t") assert fails_serious(stdchecker.escapes, r"A tab\t", r"'n Tab") assert passes(stdchecker.escapes, r"An escape escape \\", r"Escape escape \\") assert fails_serious(stdchecker.escapes, r"An escape escape \\", "Escape escape") assert passes(stdchecker.escapes, r"A double quote \"", r"Double quote \"") assert fails_serious(stdchecker.escapes, r"A double quote \"", "Double quote") # Escaped escapes assert passes(stdchecker.escapes, "An escaped newline \\n", "Escaped newline \\n") assert fails_serious(stdchecker.escapes, "An escaped newline \\n", "Escaped newline \n") # Real example ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.escapes, ",\t44\t;\t59\t:\t58\t{Tab}\t9\t{space}\t32", ",\t44\t;\t59\t:\t58\t{Tab}\t9\t{space}\t32") def test_newlines(): """tests newlines""" stdchecker = checks.StandardChecker() assert passes(stdchecker.newlines, "Nothing to see", "Niks te sien") assert passes(stdchecker.newlines, "Correct\n", "Korrek\n") assert passes(stdchecker.newlines, "Correct\r", "Korrek\r") assert passes(stdchecker.newlines, "Correct\r\n", "Korrek\r\n") assert fails(stdchecker.newlines, "A file\n", "'n Leer") assert fails(stdchecker.newlines, "A file", "'n Leer\n") assert fails(stdchecker.newlines, "A file\r", "'n Leer") assert fails(stdchecker.newlines, "A file", "'n Leer\r") assert fails(stdchecker.newlines, "A file\n", "'n Leer\r\n") assert fails(stdchecker.newlines, "A file\r\n", "'n Leer\n") assert fails(stdchecker.newlines, "blah.\nA file", "bleah. 'n leer") # msgfmt errors assert fails(stdchecker.newlines, "One two\n", "Een\ntwee") assert fails(stdchecker.newlines, "\nOne two", "Een\ntwee") # Real example ooochecker = checks.OpenOfficeChecker() assert fails(ooochecker.newlines, "The arrowhead was modified without saving.\nWould you like to save the arrowhead now?", "Ṱhoho ya musevhe yo khwinifhadzwa hu si na u seiva.Ni khou ṱoda u seiva thoho ya musevhe zwino?") def test_tabs(): """tests tabs""" stdchecker = checks.StandardChecker() assert passes(stdchecker.tabs, "Nothing to see", "Niks te sien") assert passes(stdchecker.tabs, "Correct\t", "Korrek\t") assert passes(stdchecker.tabs, "Correct\tAA", "Korrek\tAA") assert fails_serious(stdchecker.tabs, "A file\t", "'n Leer") assert fails_serious(stdchecker.tabs, "A file", "'n Leer\t") ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.tabs, ",\t44\t;\t59\t:\t58\t{Tab}\t9\t{space}\t32", ",\t44\t;\t59\t:\t58\t{Tab}\t9\t{space}\t32") def test_filepaths(): """tests filepaths""" stdchecker = checks.StandardChecker() assert passes(stdchecker.filepaths, "%s to the file /etc/hosts on your system.", "%s na die leer /etc/hosts op jou systeem.") assert fails(stdchecker.filepaths, "%s to the file /etc/hosts on your system.", "%s na die leer /etc/gasheer op jou systeem.") def test_kdecomments(): """tests kdecomments""" stdchecker = checks.StandardChecker() assert passes(stdchecker.kdecomments, r"""_: I am a comment\n A string to translate""", "'n String om te vertaal") assert fails(stdchecker.kdecomments, r"""_: I am a comment\n A string to translate""", r"""_: Ek is 'n commment\n 'n String om te vertaal""") assert fails(stdchecker.kdecomments, """_: I am a comment\\n\n""", """_: I am a comment\\n\n""") def test_long(): """tests long messages""" stdchecker = checks.StandardChecker() assert passes(stdchecker.long, "I am normal", "Ek is ook normaal") assert fails(stdchecker.long, "Short.", "Kort.......................................................................................") assert fails(stdchecker.long, "a", "bc") def test_musttranslatewords(): """tests stopwords""" stdchecker = checks.StandardChecker(checks.CheckerConfig(musttranslatewords=[])) assert passes(stdchecker.musttranslatewords, "This uses Mozilla of course", "hierdie gebruik le mozille natuurlik") stdchecker = checks.StandardChecker(checks.CheckerConfig(musttranslatewords=["Mozilla"])) assert passes(stdchecker.musttranslatewords, "This uses Mozilla of course", "hierdie gebruik le mozille natuurlik") assert fails(stdchecker.musttranslatewords, "This uses Mozilla of course", "hierdie gebruik Mozilla natuurlik") assert passes(stdchecker.musttranslatewords, "This uses Mozilla. Don't you?", "hierdie gebruik le mozille soos jy") assert fails(stdchecker.musttranslatewords, "This uses Mozilla. Don't you?", "hierdie gebruik Mozilla soos jy") # should always pass if there are no stopwords in the original assert passes(stdchecker.musttranslatewords, "This uses something else. Don't you?", "hierdie gebruik Mozilla soos jy") # check that we can find words surrounded by punctuation assert passes(stdchecker.musttranslatewords, "Click 'Mozilla' button", "Kliek 'Motzille' knoppie") assert fails(stdchecker.musttranslatewords, "Click 'Mozilla' button", "Kliek 'Mozilla' knoppie") assert passes(stdchecker.musttranslatewords, 'Click "Mozilla" button', 'Kliek "Motzille" knoppie') assert fails(stdchecker.musttranslatewords, 'Click "Mozilla" button', 'Kliek "Mozilla" knoppie') assert fails(stdchecker.musttranslatewords, 'Click "Mozilla" button', u'Kliek «Mozilla» knoppie') assert passes(stdchecker.musttranslatewords, "Click (Mozilla) button", "Kliek (Motzille) knoppie") assert fails(stdchecker.musttranslatewords, "Click (Mozilla) button", "Kliek (Mozilla) knoppie") assert passes(stdchecker.musttranslatewords, "Click Mozilla!", "Kliek Motzille!") assert fails(stdchecker.musttranslatewords, "Click Mozilla!", "Kliek Mozilla!") ## We need to define more word separators to allow us to find those hidden untranslated items #assert fails(stdchecker.musttranslatewords, "Click OK", "Blah we-OK") # Don't get confused when variables are the same as a musttranslate word stdchecker = checks.StandardChecker(checks.CheckerConfig(varmatches=[("%", None), ], musttranslatewords=["OK"])) assert passes(stdchecker.musttranslatewords, "Click %OK to start", "Kliek %OK om te begin") # Unicode assert fails(stdchecker.musttranslatewords, "Click OK", u"Kiḽikani OK") def test_notranslatewords(): """tests stopwords""" stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=[])) assert passes(stdchecker.notranslatewords, "This uses Mozilla of course", "hierdie gebruik le mozille natuurlik") stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["Mozilla", "Opera"])) assert fails(stdchecker.notranslatewords, "This uses Mozilla of course", "hierdie gebruik le mozille natuurlik") assert passes(stdchecker.notranslatewords, "This uses Mozilla of course", "hierdie gebruik Mozilla natuurlik") assert fails(stdchecker.notranslatewords, "This uses Mozilla. Don't you?", "hierdie gebruik le mozille soos jy") assert passes(stdchecker.notranslatewords, "This uses Mozilla. Don't you?", "hierdie gebruik Mozilla soos jy") # should always pass if there are no stopwords in the original assert passes(stdchecker.notranslatewords, "This uses something else. Don't you?", "hierdie gebruik Mozilla soos jy") # Cope with commas assert passes(stdchecker.notranslatewords, "using Mozilla Task Manager", u"šomiša Selaola Mošomo sa Mozilla, gomme") # Find words even if they are embedded in punctuation assert fails(stdchecker.notranslatewords, "Click 'Mozilla' button", "Kliek 'Motzille' knoppie") assert passes(stdchecker.notranslatewords, "Click 'Mozilla' button", "Kliek 'Mozilla' knoppie") assert fails(stdchecker.notranslatewords, "Click Mozilla!", "Kliek Motzille!") assert passes(stdchecker.notranslatewords, "Click Mozilla!", "Kliek Mozilla!") assert fails(stdchecker.notranslatewords, "Searches (From Opera)", "adosako (kusukela ku- Ophera)") stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["Sun", "NeXT"])) assert fails(stdchecker.notranslatewords, "Sun/NeXT Audio", "Odio dza Ḓuvha/TeVHELAHO") assert passes(stdchecker.notranslatewords, "Sun/NeXT Audio", "Odio dza Sun/NeXT") stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["sendmail"])) assert fails(stdchecker.notranslatewords, "because 'sendmail' could", "ngauri 'rumelameiḽi' a yo") assert passes(stdchecker.notranslatewords, "because 'sendmail' could", "ngauri 'sendmail' a yo") stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["Base"])) assert fails(stdchecker.notranslatewords, " - %PRODUCTNAME Base: Relation design", " - %PRODUCTNAME Sisekelo: Umsiko wekuhlobana") stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["Writer"])) assert fails(stdchecker.notranslatewords, "&[ProductName] Writer/Web", "&[ProductName] Umbhali/iWebhu") # Unicode - different decompositions stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=[u"\u1e3cike"])) assert passes(stdchecker.notranslatewords, u"You \u1e3cike me", u"Ek \u004c\u032dike jou") def test_numbers(): """test numbers""" stdchecker = checks.StandardChecker() assert passes(stdchecker.numbers, "Netscape 4 was not as good as Netscape 7.", "Netscape 4 was nie so goed soos Netscape 7 nie.") # Check for correct detection of degree. Also check that we aren't getting confused with 1 and 2 byte UTF-8 characters assert fails(stdchecker.numbers, "180° turn", "180 turn") assert passes(stdchecker.numbers, "180° turn", "180° turn") assert fails(stdchecker.numbers, "180° turn", "360 turn") assert fails(stdchecker.numbers, "180° turn", "360° turn") assert passes(stdchecker.numbers, "180~ turn", "180 turn") assert passes(stdchecker.numbers, "180¶ turn", "180 turn") # Numbers with multiple decimal points assert passes(stdchecker.numbers, "12.34.56", "12.34.56") assert fails(stdchecker.numbers, "12.34.56", "98.76.54") # Currency # FIXME we should probably be able to handle currency checking with locale inteligence assert passes(stdchecker.numbers, "R57.60", "R57.60") # FIXME - again locale intelligence should allow us to use other decimal seperators assert fails(stdchecker.numbers, "R57.60", "R57,60") assert fails(stdchecker.numbers, "1,000.00", "1 000,00") # You should be able to reorder numbers assert passes(stdchecker.numbers, "40-bit RC2 encryption with RSA and an MD5", "Umbhalo ocashile i-RC2 onamabhithi angu-40 one-RSA ne-MD5") # Don't fail the numbers check if the entry is a dialogsize entry mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.numbers, 'width: 12em;', 'width: 20em;') def test_options(): """tests command line options e.g. --option""" stdchecker = checks.StandardChecker() assert passes(stdchecker.options, "--help", "--help") assert fails(stdchecker.options, "--help", "--hulp") assert fails(stdchecker.options, "--input=FILE", "--input=FILE") assert passes(stdchecker.options, "--input=FILE", "--input=LÊER") assert fails(stdchecker.options, "--input=FILE", "--tovoer=LÊER") # We don't want just any '--' to trigger this test - the error will be confusing assert passes(stdchecker.options, "Hello! -- Hi", "Hallo! &mdash; Haai") assert passes(stdchecker.options, "--blank--", "--vide--") def test_printf(): """tests printf style variables""" # This should really be a subset of the variable checks # Ideally we should be able to adapt based on #, directives also stdchecker = checks.StandardChecker() assert passes(stdchecker.printf, "I am %s", "Ek is %s") assert fails(stdchecker.printf, "I am %s", "Ek is %d") assert passes(stdchecker.printf, "I am %#100.50hhf", "Ek is %#100.50hhf") assert fails(stdchecker.printf, "I am %#100s", "Ek is %10s") assert fails(stdchecker.printf, "... for user %.100s on %.100s:", "... lomuntu osebenzisa i-%. I-100s e-100s:") assert passes(stdchecker.printf, "%dMB", "%d MG") # Reordering assert passes(stdchecker.printf, "String %s and number %d", "String %1$s en nommer %2$d") assert passes(stdchecker.printf, "String %1$s and number %2$d", "String %1$s en nommer %2$d") assert passes(stdchecker.printf, "String %s and number %d", "Nommer %2$d and string %1$s") assert passes(stdchecker.printf, "String %s and real number %f and number %d", "String %1$s en nommer %3$d en reële getal %2$f") assert passes(stdchecker.printf, "String %1$s and real number %2$f and number %3$d", "String %1$s en nommer %3$d en reële getal %2$f") assert passes(stdchecker.printf, "Real number %2$f and string %1$s and number %3$d", "String %1$s en nommer %3$d en reële getal %2$f") assert fails(stdchecker.printf, "String %s and number %d", "Nommer %1$d and string %2$s") assert fails(stdchecker.printf, "String %s and real number %f and number %d", "String %1$s en nommer %3$d en reële getal %2$d") assert fails(stdchecker.printf, "String %s and real number %f and number %d", "String %1$s en nommer %3$d en reële getal %4$f") assert fails(stdchecker.printf, "String %s and real number %f and number %d", "String %2$s en nommer %3$d en reële getal %2$f") assert fails(stdchecker.printf, "Real number %2$f and string %1$s and number %3$d", "String %1$f en nommer %3$d en reële getal %2$f") # checking python format strings assert passes(stdchecker.printf, "String %(1)s and number %(2)d", "Nommer %(2)d en string %(1)s") assert passes(stdchecker.printf, "String %(str)s and number %(num)d", "Nommer %(num)d en string %(str)s") assert fails(stdchecker.printf, "String %(str)s and number %(num)d", "Nommer %(nommer)d en string %(str)s") assert fails(stdchecker.printf, "String %(str)s and number %(num)d", "Nommer %(num)d en string %s") # checking omitted plural format string placeholder %.0s stdchecker.hasplural = 1 assert passes(stdchecker.printf, "%d plurals", "%.0s plural") def test_puncspacing(): """tests spacing after punctuation""" stdchecker = checks.StandardChecker() assert passes(stdchecker.puncspacing, "One, two, three.", "Kunye, kubili, kuthathu.") assert passes(stdchecker.puncspacing, "One, two, three. ", "Kunye, kubili, kuthathu.") assert fails(stdchecker.puncspacing, "One, two, three. ", "Kunye, kubili,kuthathu.") assert passes(stdchecker.puncspacing, "One, two, three!?", "Kunye, kubili, kuthathu?") # Some languages have padded puntuation marks frchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="fr")) assert passes(frchecker.puncspacing, "Do \"this\"", "Do « this »") assert passes(frchecker.puncspacing, u"Do \"this\"", u"Do «\u00a0this\u00a0»") assert fails(frchecker.puncspacing, "Do \"this\"", "Do «this»") def test_purepunc(): """tests messages containing only punctuation""" stdchecker = checks.StandardChecker() assert passes(stdchecker.purepunc, ".", ".") assert passes(stdchecker.purepunc, "", "") assert fails(stdchecker.purepunc, ".", " ") assert fails(stdchecker.purepunc, "Find", "'") assert fails(stdchecker.purepunc, "'", "Find") assert passes(stdchecker.purepunc, "year measurement template|2000", "2000") def test_sentencecount(): """tests sentencecount messages""" stdchecker = checks.StandardChecker() assert passes(stdchecker.sentencecount, "One. Two. Three.", "Een. Twee. Drie.") assert passes(stdchecker.sentencecount, "One two three", "Een twee drie.") assert fails(stdchecker.sentencecount, "One. Two. Three.", "Een Twee. Drie.") assert passes(stdchecker.sentencecount, "Sentence with i.e. in it.", "Sin met d.w.s. in dit.") # bug 178, description item 8 el_checker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='el')) assert fails(el_checker.sentencecount, "First sentence. Second sentence.", "Πρώτη πρόταση. δεύτερη πρόταση.") def test_short(): """tests short messages""" stdchecker = checks.StandardChecker() assert passes(stdchecker.short, "I am normal", "Ek is ook normaal") assert fails(stdchecker.short, "I am a very long sentence", "Ek") assert fails(stdchecker.short, "abcde", "c") def test_singlequoting(): """tests single quotes""" stdchecker = checks.StandardChecker() assert passes(stdchecker.singlequoting, "A 'Hot' plate", "Ipuleti 'elishisa' kunye") # FIXME this should pass but doesn't probably to do with our logic that got confused at the end of lines assert passes(stdchecker.singlequoting, "'Hot' plate", "Ipuleti 'elishisa'") # FIXME newlines also confuse our algorithm for single quotes assert passes(stdchecker.singlequoting, "File '%s'\n", "'%s' Faele\n") assert fails(stdchecker.singlequoting, "'Hot' plate", "Ipuleti \"elishisa\"") assert passes(stdchecker.singlequoting, "It's here.", "Dit is hier.") # Don't get confused by punctuation that touches a single quote assert passes(stdchecker.singlequoting, "File '%s'.", "'%s' Faele.") assert passes(stdchecker.singlequoting, "Blah 'format' blah.", "Blah blah 'sebopego'.") assert passes(stdchecker.singlequoting, "Blah 'format' blah!", "Blah blah 'sebopego'!") assert passes(stdchecker.singlequoting, "Blah 'format' blah?", "Blah blah 'sebopego'?") # Real examples assert passes(stdchecker.singlequoting, "A nickname that identifies this publishing site (e.g.: 'MySite')", "Vito ro duvulela leri tirhisiwaka ku kuma sayiti leri ro kandziyisa (xik.: 'Sayiti ra Mina')") assert passes(stdchecker.singlequoting, "isn't", "ayikho") assert passes(stdchecker.singlequoting, "Required (can't send message unless all recipients have certificates)", "Verlang (kan nie boodskappe versend tensy al die ontvangers sertifikate het nie)") # Afrikaans 'n assert passes(stdchecker.singlequoting, "Please enter a different site name.", "Tik 'n ander werfnaam in.") assert passes(stdchecker.singlequoting, "\"%name%\" already exists. Please enter a different site name.", "\"%name%\" bestaan reeds. Tik 'n ander werfnaam in.") # Check that accelerators don't mess with removing singlequotes mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.singlequoting, "&Don't import anything", "&Moenie enigiets invoer nie") ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.singlequoting, "~Don't import anything", "~Moenie enigiets invoer nie") vichecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="vi")) assert passes(vichecker.singlequoting, "Save 'File'", u"Lưu « Tập tin »") assert passes(vichecker.singlequoting, "Save `File'", u"Lưu « Tập tin »") def test_simplecaps(): """tests simple caps""" # Simple caps is a very vauge test so the checks here are mostly for obviously fixable problem # or for checking obviously correct situations that are triggering a failure. stdchecker = checks.StandardChecker() assert passes(stdchecker.simplecaps, "MB of disk space for the cache.", "MB yendzawo yediski etsala.") # We should squash 'I' in the source text as it messes with capital detection assert passes(stdchecker.simplecaps, "if you say I want", "as jy se ek wil") assert passes(stdchecker.simplecaps, "sentence. I want more.", "sin. Ek wil meer he.") assert passes(stdchecker.simplecaps, "Where are we? I can't see where we are going.", "Waar is ons? Ek kan nie sien waar ons gaan nie.") ## We should remove variables before checking stdchecker = checks.StandardChecker(checks.CheckerConfig(varmatches=[("%", 1)])) assert passes(stdchecker.simplecaps, "Could not load %s", "A swi koteki ku panga %S") assert passes(stdchecker.simplecaps, "The element \"%S\" is not recognized.", "Elemente \"%S\" a yi tiveki.") stdchecker = checks.StandardChecker(checks.CheckerConfig(varmatches=[("&", ";")])) assert passes(stdchecker.simplecaps, "Determine how &brandShortName; connects to the Internet.", "Kuma &brandShortName; hlanganisa eka Internete.") ## If source is ALL CAPS then we should just check that target is also ALL CAPS assert passes(stdchecker.simplecaps, "COUPDAYS", "COUPMALANGA") # Just some that at times have failed but should always pass assert passes(stdchecker.simplecaps, "Create a query entering an SQL statement directly.", "Yakha sibuti singena SQL inkhomba yesitatimende.") ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.simplecaps, "SOLK (%PRODUCTNAME Link)", "SOLK (%PRODUCTNAME Thumanyo)") assert passes(ooochecker.simplecaps, "%STAROFFICE Image", "Tshifanyiso tsha %STAROFFICE") assert passes(stdchecker.simplecaps, "Flies, flies, everywhere! Ack!", u"Vlieë, oral vlieë! Jig!") def test_spellcheck(): """tests spell checking""" stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="af")) assert passes(stdchecker.spellcheck, "Great trek", "Groot trek") assert fails(stdchecker.spellcheck, "Final deadline", "End of the road") # Bug 289: filters accelerators before spell checking stdchecker = checks.StandardChecker(checks.CheckerConfig(accelmarkers="&", targetlanguage="fi")) assert passes(stdchecker.spellcheck, "&Reload Frame", "P&äivitä kehys") # Ensure we don't check notranslatewords stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="af")) assert fails(stdchecker.spellcheck, "Mozilla is wonderful", "Mozillaaa is wonderlik") # We should pass the test if the "error" occurs in the English assert passes(stdchecker.spellcheck, "Mozilla is wonderful", "Mozilla is wonderlik") stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="af", notranslatewords=["Mozilla"])) assert passes(stdchecker.spellcheck, "Mozilla is wonderful", "Mozilla is wonderlik") def test_startcaps(): """tests starting capitals""" stdchecker = checks.StandardChecker() assert passes(stdchecker.startcaps, "Find", "Vind") assert passes(stdchecker.startcaps, "find", "vind") assert fails(stdchecker.startcaps, "Find", "vind") assert fails(stdchecker.startcaps, "find", "Vind") assert passes(stdchecker.startcaps, "'", "'") assert passes(stdchecker.startcaps, "\\.,/?!`'\"[]{}()@#$%^&*_-;:<>Find", "\\.,/?!`'\"[]{}()@#$%^&*_-;:<>Vind") # With leading whitespace assert passes(stdchecker.startcaps, " Find", " Vind") assert passes(stdchecker.startcaps, " find", " vind") assert fails(stdchecker.startcaps, " Find", " vind") assert fails(stdchecker.startcaps, " find", " Vind") # Leading punctuation assert passes(stdchecker.startcaps, "'Find", "'Vind") assert passes(stdchecker.startcaps, "'find", "'vind") assert fails(stdchecker.startcaps, "'Find", "'vind") assert fails(stdchecker.startcaps, "'find", "'Vind") # Unicode assert passes(stdchecker.startcaps, "Find", u"Šind") assert passes(stdchecker.startcaps, "find", u"šind") assert fails(stdchecker.startcaps, "Find", u"šind") assert fails(stdchecker.startcaps, "find", u"Šind") # Unicode further down the Unicode tables assert passes(stdchecker.startcaps, "A text enclosed...", u"Ḽiṅwalwa ḽo katelwaho...") assert fails(stdchecker.startcaps, "A text enclosed...", u"ḽiṅwalwa ḽo katelwaho...") # Accelerators stdchecker = checks.StandardChecker(checks.CheckerConfig(accelmarkers="&")) assert passes(stdchecker.startcaps, "&Find", "Vi&nd") # Language specific stuff stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='af')) assert passes(stdchecker.startcaps, "A cow", "'n Koei") assert passes(stdchecker.startcaps, "A list of ", "'n Lys van ") # should pass: #assert passes(stdchecker.startcaps, "A 1k file", u"'n 1k-lêer") assert passes(stdchecker.startcaps, "'Do it'", "'Doen dit'") assert fails(stdchecker.startcaps, "'Closer than'", "'nader as'") assert passes(stdchecker.startcaps, "List", "Lys") assert passes(stdchecker.startcaps, "a cow", "'n koei") assert fails(stdchecker.startcaps, "a cow", "'n Koei") assert passes(stdchecker.startcaps, "(A cow)", "('n Koei)") assert fails(stdchecker.startcaps, "(a cow)", "('n Koei)") def test_startpunc(): """tests startpunc""" stdchecker = checks.StandardChecker() assert passes(stdchecker.startpunc, "<< Previous", "<< Correct") assert fails(stdchecker.startpunc, " << Previous", "Wrong") assert fails(stdchecker.startpunc, "Question", u"\u2026Wrong") assert passes(stdchecker.startpunc, "<fish>hello</fish> world", "world <fish>hello</fish>") # The inverted Spanish question mark should be accepted stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='es')) assert passes(stdchecker.startpunc, "Do you want to reload the file?", u"¿Quiere recargar el archivo?") # The Afrikaans indefinite article should be accepted stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='af')) assert passes(stdchecker.startpunc, "A human?", u"'n Mens?") def test_startwhitespace(): """tests startwhitespace""" stdchecker = checks.StandardChecker() assert passes(stdchecker.startwhitespace, "A setence.", "I'm correct.") assert fails(stdchecker.startwhitespace, " A setence.", "I'm incorrect.") def test_unchanged(): """tests unchanged entries""" stdchecker = checks.StandardChecker(checks.CheckerConfig(accelmarkers="&")) assert fails(stdchecker.unchanged, "Unchanged", "Unchanged") assert fails(stdchecker.unchanged, "&Unchanged", "Un&changed") assert passes(stdchecker.unchanged, "Unchanged", "Changed") assert passes(stdchecker.unchanged, "1234", "1234") assert passes(stdchecker.unchanged, "2×2", "2×2") # bug 178, description item 14 assert passes(stdchecker.unchanged, "I", "I") assert passes(stdchecker.unchanged, " ", " ") # bug 178, description item 5 assert passes(stdchecker.unchanged, "???", "???") # bug 178, description item 15 assert passes(stdchecker.unchanged, "&ACRONYM", "&ACRONYM") # bug 178, description item 7 assert passes(stdchecker.unchanged, "F1", "F1") # bug 178, description item 20 assert fails(stdchecker.unchanged, "Two words", "Two words") #TODO: this still fails gnomechecker = checks.GnomeChecker() assert fails(gnomechecker.unchanged, "Entity references, such as &amp; and &#169;", "Entity references, such as &amp; and &#169;") # Variable only and variable plus punctuation messages should be ignored mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.unchanged, "$ProgramName$", "$ProgramName$") assert passes(mozillachecker.unchanged, "$file$ : $dir$", "$file$ : $dir$") # bug 178, description item 13 assert fails(mozillachecker.unchanged, "$file$ in $dir$", "$file$ in $dir$") assert passes(mozillachecker.unchanged, "&brandShortName;", "&brandShortName;") # Don't translate words should be ignored stdchecker = checks.StandardChecker(checks.CheckerConfig(notranslatewords=["Mozilla"])) assert passes(stdchecker.unchanged, "Mozilla", "Mozilla") # bug 178, description item 10 # Don't fail unchanged if the entry is a dialogsize, quite plausible that you won't change it mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.unchanged, 'width: 12em;', 'width: 12em;') def test_untranslated(): """tests untranslated entries""" stdchecker = checks.StandardChecker() assert fails(stdchecker.untranslated, "I am untranslated", "") assert passes(stdchecker.untranslated, "I am translated", "Ek is vertaal") # KDE comments that make it into translations should not mask untranslated test assert fails(stdchecker.untranslated, "_: KDE comment\\n\nI am untranslated", "_: KDE comment\\n\n") def test_validchars(): """tests valid characters""" stdchecker = checks.StandardChecker(checks.CheckerConfig()) assert passes(stdchecker.validchars, "The check always passes if you don't specify chars", "Die toets sal altyd werk as jy nie karacters specifisier") stdchecker = checks.StandardChecker(checks.CheckerConfig(validchars='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')) assert passes(stdchecker.validchars, "This sentence contains valid characters", "Hierdie sin bevat ware karakters") assert fails(stdchecker.validchars, "Some unexpected characters", "©®°±÷¼½¾") stdchecker = checks.StandardChecker(checks.CheckerConfig(validchars='⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰')) assert passes(stdchecker.validchars, "Our target language is all non-ascii", "⠁⠂⠃⠄⠆⠇⠈⠉⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫") assert fails(stdchecker.validchars, "Our target language is all non-ascii", "Some ascii⠁⠂⠃⠄⠆⠇⠈⠉⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫") stdchecker = checks.StandardChecker(checks.CheckerConfig(validchars=u'\u004c\u032d')) assert passes(stdchecker.validchars, "This sentence contains valid chars", u"\u004c\u032d") assert passes(stdchecker.validchars, "This sentence contains valid chars", u"\u1e3c") stdchecker = checks.StandardChecker(checks.CheckerConfig(validchars=u'\u1e3c')) assert passes(stdchecker.validchars, "This sentence contains valid chars", u"\u1e3c") assert passes(stdchecker.validchars, "This sentence contains valid chars", u"\u004c\u032d") def test_variables_kde(): """tests variables in KDE translations""" # GNOME variables kdechecker = checks.KdeChecker() assert passes(kdechecker.variables, "%d files of type %s saved.", "%d leers van %s tipe gestoor.") assert fails_serious(kdechecker.variables, "%d files of type %s saved.", "%s leers van %s tipe gestoor.") def test_variables_gnome(): """tests variables in GNOME translations""" # GNOME variables gnomechecker = checks.GnomeChecker() assert passes(gnomechecker.variables, "%d files of type %s saved.", "%d leers van %s tipe gestoor.") assert fails_serious(gnomechecker.variables, "%d files of type %s saved.", "%s leers van %s tipe gestoor.") assert passes(gnomechecker.variables, "Save $(file)", "Stoor $(file)") assert fails_serious(gnomechecker.variables, "Save $(file)", "Stoor $(leer)") def test_variables_mozilla(): """tests variables in Mozilla translations""" # Mozilla variables mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.variables, "Use the &brandShortname; instance.", "Gebruik die &brandShortname; weergawe.") assert fails_serious(mozillachecker.variables, "Use the &brandShortname; instance.", "Gebruik die &brandKortnaam; weergawe.") assert passes(mozillachecker.variables, "Save %file%", "Stoor %file%") assert fails_serious(mozillachecker.variables, "Save %file%", "Stoor %leer%") assert passes(mozillachecker.variables, "Save $file$", "Stoor $file$") assert fails_serious(mozillachecker.variables, "Save $file$", "Stoor $leer$") assert passes(mozillachecker.variables, "%d files of type %s saved.", "%d leers van %s tipe gestoor.") assert fails_serious(mozillachecker.variables, "%d files of type %s saved.", "%s leers van %s tipe gestoor.") assert passes(mozillachecker.variables, "Save $file", "Stoor $file") assert fails_serious(mozillachecker.variables, "Save $file", "Stoor $leer") assert passes(mozillachecker.variables, "About $ProgramName$", "Oor $ProgramName$") assert fails_serious(mozillachecker.variables, "About $ProgramName$", "Oor $NaamVanProgam$") assert passes(mozillachecker.variables, "About $_CLICK", "Oor $_CLICK") assert fails_serious(mozillachecker.variables, "About $_CLICK", "Oor $_KLIK") assert passes(mozillachecker.variables, "About $_CLICK and more", "Oor $_CLICK en meer") assert fails_serious(mozillachecker.variables, "About $_CLICK and more", "Oor $_KLIK en meer") assert passes(mozillachecker.variables, "About $(^NameDA)", "Oor $(^NameDA)") assert fails_serious(mozillachecker.variables, "About $(^NameDA)", "Oor $(^NaamDA)") # Double variable problem assert fails_serious(mozillachecker.variables, "Create In &lt;&lt;", "Etsa ka Ho &lt;lt;") # Variables at the end of a sentence assert fails_serious(mozillachecker.variables, "...time you start &brandShortName;.", "...lekgetlo le latelang ha o qala &LebitsoKgutshwane la kgwebo;.") # Ensure that we can detect two variables of the same name with one faulty assert fails_serious(mozillachecker.variables, "&brandShortName; successfully downloaded and installed updates. You will have to restart &brandShortName; to complete the update.", "&brandShortName; ḽo dzhenisa na u longela khwinifhadzo zwavhuḓi. Ni ḓo tea u thoma hafhu &DzinaḼipfufhi ḽa pfungavhuṇe; u itela u fhedzisa khwinifha dzo.") # We must detect entities in their fullform, ie with fullstop in the middle. assert fails_serious(mozillachecker.variables, "Welcome to the &pluginWizard.title;", "Wamkelekile kwi&Sihloko Soncedo lwe-plugin;") # Variables that are missing in quotes should be detected assert fails_serious(mozillachecker.variables, "\"%S\" is an executable file.... Are you sure you want to launch \"%S\"?", ".... Uyaqiniseka ukuthi ufuna ukuqalisa I\"%S\"?") # False positive $ style variables assert passes(mozillachecker.variables, "for reporting $ProductShortName$ crash information", "okokubika ukwaziswa kokumosheka kwe-$ProductShortName$") # We shouldn't mask variables within variables. This should highlight &brandShortName as missing and &amp as extra assert fails_serious(mozillachecker.variables, "&brandShortName;", "&amp;brandShortName;") def test_variables_openoffice(): """tests variables in OpenOffice translations""" # OpenOffice.org variables ooochecker = checks.OpenOfficeChecker() assert passes(ooochecker.variables, "Use the &brandShortname; instance.", "Gebruik die &brandShortname; weergawe.") assert fails_serious(ooochecker.variables, "Use the &brandShortname; instance.", "Gebruik die &brandKortnaam; weergawe.") assert passes(ooochecker.variables, "Save %file%", "Stoor %file%") assert fails_serious(ooochecker.variables, "Save %file%", "Stoor %leer%") assert passes(ooochecker.variables, "Save %file", "Stoor %file") assert fails_serious(ooochecker.variables, "Save %file", "Stoor %leer") assert passes(ooochecker.variables, "Save %1", "Stoor %1") assert fails_serious(ooochecker.variables, "Save %1", "Stoor %2") assert passes(ooochecker.variables, "Save %", "Stoor %") assert fails_serious(ooochecker.variables, "Save %", "Stoor") assert passes(ooochecker.variables, "Save $(file)", "Stoor $(file)") assert fails_serious(ooochecker.variables, "Save $(file)", "Stoor $(leer)") assert passes(ooochecker.variables, "Save $file$", "Stoor $file$") assert fails_serious(ooochecker.variables, "Save $file$", "Stoor $leer$") assert passes(ooochecker.variables, "Save ${file}", "Stoor ${file}") assert fails_serious(ooochecker.variables, "Save ${file}", "Stoor ${leer}") assert passes(ooochecker.variables, "Save #file#", "Stoor #file#") assert fails_serious(ooochecker.variables, "Save #file#", "Stoor #leer#") assert passes(ooochecker.variables, "Save #1", "Stoor #1") assert fails_serious(ooochecker.variables, "Save #1", "Stoor #2") assert passes(ooochecker.variables, "Save #", "Stoor #") assert fails_serious(ooochecker.variables, "Save #", "Stoor") assert passes(ooochecker.variables, "Save ($file)", "Stoor ($file)") assert fails_serious(ooochecker.variables, "Save ($file)", "Stoor ($leer)") assert passes(ooochecker.variables, "Save $[file]", "Stoor $[file]") assert fails_serious(ooochecker.variables, "Save $[file]", "Stoor $[leer]") assert passes(ooochecker.variables, "Save [file]", "Stoor [file]") assert fails_serious(ooochecker.variables, "Save [file]", "Stoor [leer]") assert passes(ooochecker.variables, "Save $file", "Stoor $file") assert fails_serious(ooochecker.variables, "Save $file", "Stoor $leer") assert passes(ooochecker.variables, "Use @EXTENSION@", "Gebruik @EXTENSION@") assert fails_serious(ooochecker.variables, "Use @EXTENSUION@", "Gebruik @UITBRUIDING@") # Same variable name twice assert fails_serious(ooochecker.variables, r"""Start %PROGRAMNAME% as %PROGRAMNAME%""", "Begin %PROGRAMNAME%") def test_variables_cclicense(): """Tests variables in Creative Commons translations.""" checker = checks.CCLicenseChecker() assert passes(checker.variables, "CC-GNU @license_code@.", "CC-GNU @license_code@.") assert fails_serious(checker.variables, "CC-GNU @license_code@.", "CC-GNU @lisensie_kode@.") assert passes(checker.variables, "Deed to the @license_name_full@", "Akte vir die @license_name_full@") assert fails_serious(checker.variables, "Deed to the @license_name_full@", "Akte vir die @volle_lisensie@") assert passes(checker.variables, "The @license_name_full@ is", "Die @license_name_full@ is") assert fails_serious(checker.variables, "The @license_name_full@ is", "Die @iiilicense_name_full@ is") assert fails_serious(checker.variables, "A @ccvar@", "'n @ccvertaaldeveranderlike@") def test_xmltags(): """tests xml tags""" stdchecker = checks.StandardChecker() assert fails(stdchecker.xmltags, "Do it <b>now</b>", "Doen dit <v>nou</v>") assert passes(stdchecker.xmltags, "Do it <b>now</b>", "Doen dit <b>nou</b>") assert passes(stdchecker.xmltags, "Click <img src=\"img.jpg\">here</img>", "Klik <img src=\"img.jpg\">hier</img>") assert fails(stdchecker.xmltags, "Click <img src=\"image.jpg\">here</img>", "Klik <img src=\"prent.jpg\">hier</img>") assert passes(stdchecker.xmltags, "Click <img src=\"img.jpg\" alt=\"picture\">here</img>", "Klik <img src=\"img.jpg\" alt=\"prentjie\">hier</img>") assert passes(stdchecker.xmltags, "Click <a title=\"tip\">here</a>", "Klik <a title=\"wenk\">hier</a>") assert passes(stdchecker.xmltags, "Click <div title=\"tip\">here</div>", "Klik <div title=\"wenk\">hier</div>") assert passes(stdchecker.xmltags, "Start with the &lt;start&gt; tag", "Begin met die &lt;begin&gt;") assert fails(stdchecker.xmltags, "Click <a href=\"page.html\">", "Klik <a hverw=\"page.html\">") assert passes(stdchecker.xmltags, "Click <a xml-lang=\"en\" href=\"page.html\">", "Klik <a xml-lang=\"af\" href=\"page.html\">") assert passes(stdchecker.xmltags, "Click <div lang=\"en\" dir=\"ltr\">", "Klik <div lang=\"ar\" dir=\"rtl\">") assert fails(stdchecker.xmltags, "Click <a href=\"page.html\" target=\"koei\">", "Klik <a href=\"page.html\">") assert fails(stdchecker.xmltags, "<b>Current Translation</b>", "<b>Traducción Actual:<b>") assert passes(stdchecker.xmltags, "<Error>", "<Fout>") assert fails(stdchecker.xmltags, "%d/%d translated\n(%d blank, %d fuzzy)", "<br>%d/%d μεταφρασμένα\n<br>(%d κενά, %d ασαφή)") assert fails(stdchecker.xmltags, '(and <a href="http://www.schoolforge.net/education-software" class="external">other open source software</a>)', '(en <a href="http://www.schoolforge.net/education-software" class="external">ander Vry Sagteware</a)') assert fails(stdchecker.xmltags, 'Because Tux Paint (and <a href="http://www.schoolforge.net/education-software" class="external">other open source software</a>) is free of cost and not limited in any way, a school can use it <i>today</i>, without waiting for procurement or a budget!', 'Omdat Tux Paint (en <a href="http://www.schoolforge.net/education-software" class="external">ander Vry Sagteware</a)gratis is en nie beperk is op enige manier nie, kan \'n skool dit vandag</i> gebruik sonder om te wag vir goedkeuring of \'n begroting!') assert fails(stdchecker.xmltags, "test <br />", "test <br>") assert fails(stdchecker.xmltags, "test <img src='foo.jpg'/ >", "test <img src='foo.jpg' >") frchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage="fr")) assert fails(frchecker.xmltags, "Click <a href=\"page.html\">", "Klik <a href=« page.html »>") def test_ooxmltags(): """Tests the xml tags in OpenOffice.org translations for quality as done in gsicheck""" ooochecker = checks.OpenOfficeChecker() #some attributes can be changed or removed assert fails(ooochecker.xmltags, "<img src=\"a.jpg\" width=\"400\">", "<img src=\"b.jpg\" width=\"500\">") assert passes(ooochecker.xmltags, "<img src=\"a.jpg\" width=\"400\">", "<img src=\"a.jpg\" width=\"500\">") assert passes(ooochecker.xmltags, "<img src=\"a.jpg\" width=\"400\">", "<img src=\"a.jpg\">") assert passes(ooochecker.xmltags, "<img src=\"a.jpg\">", "<img src=\"a.jpg\" width=\"400\">") assert passes(ooochecker.xmltags, "<alt xml-lang=\"ab\">text</alt>", "<alt>teks</alt>") assert passes(ooochecker.xmltags, "<ahelp visibility=\"visible\">bla</ahelp>", "<ahelp>blu</ahelp>") assert fails(ooochecker.xmltags, "<ahelp visibility=\"visible\">bla</ahelp>", "<ahelp visibility=\"invisible\">blu</ahelp>") assert fails(ooochecker.xmltags, "<ahelp visibility=\"invisible\">bla</ahelp>", "<ahelp>blu</ahelp>") #some attributes can be changed, but not removed assert passes(ooochecker.xmltags, "<link name=\"John\">", "<link name=\"Jan\">") assert fails(ooochecker.xmltags, "<link name=\"John\">", "<link naam=\"Jan\">") # Reported OOo error ## Bug 1910 assert fails(ooochecker.xmltags, u"""<variable id="FehlendesElement">In a database file window, click the <emph>Queries</emph> icon, then choose <emph>Edit - Edit</emph>. When referenced fields no longer exist, you see this dialog</variable>""", u"""<variable id="FehlendesElement">Dans une fenêtre de fichier de base de données, cliquez sur l'icône <emph>Requêtes</emph>, puis choisissez <emph>Éditer - Éditer</emp>. Lorsque les champs de référence n'existent plus, vous voyez cette boîte de dialogue</variable>""") assert fails(ooochecker.xmltags, "<variable> <emph></emph> <emph></emph> </variable>", "<variable> <emph></emph> <emph></emp> </variable>") def test_functions(): """tests to see that funtions() are not translated""" stdchecker = checks.StandardChecker() assert fails(stdchecker.functions, "blah rgb() blah", "blee brg() blee") assert passes(stdchecker.functions, "blah rgb() blah", "blee rgb() blee") assert fails(stdchecker.functions, "percentage in rgb()", "phesenthe kha brg()") assert passes(stdchecker.functions, "percentage in rgb()", "phesenthe kha rgb()") assert fails(stdchecker.functions, "rgb() in percentage", "brg() kha phesenthe") assert passes(stdchecker.functions, "rgb() in percentage", "rgb() kha phesenthe") assert fails(stdchecker.functions, "blah string.rgb() blah", "blee bleeb.rgb() blee") assert passes(stdchecker.functions, "blah string.rgb() blah", "blee string.rgb() blee") assert passes(stdchecker.functions, "or domain().", "domain() verwag.") assert passes(stdchecker.functions, "Expected url(), url-prefix(), or domain().", "url(), url-prefix() of domain() verwag.") def test_emails(): """tests to see that email addresses are not translated""" stdchecker = checks.StandardChecker() assert fails(stdchecker.emails, "blah bob@example.net blah", "blee kobus@voorbeeld.net blee") assert passes(stdchecker.emails, "blah bob@example.net blah", "blee bob@example.net blee") def test_urls(): """tests to see that URLs are not translated""" stdchecker = checks.StandardChecker() assert fails(stdchecker.urls, "blah http://translate.org.za blah", "blee http://vertaal.org.za blee") assert passes(stdchecker.urls, "blah http://translate.org.za blah", "blee http://translate.org.za blee") def test_simpleplurals(): """test that we can find English style plural(s)""" stdchecker = checks.StandardChecker() assert passes(stdchecker.simpleplurals, "computer(s)", "rekenaar(s)") assert fails(stdchecker.simpleplurals, "plural(s)", "meervoud(e)") assert fails(stdchecker.simpleplurals, "Ungroup Metafile(s)...", "Kuvhanganyululani Metafaela(dzi)...") # Test a language that doesn't use plurals stdchecker = checks.StandardChecker(checks.CheckerConfig(targetlanguage='vi')) assert passes(stdchecker.simpleplurals, "computer(s)", u"Máy tính") assert fails(stdchecker.simpleplurals, "computer(s)", u"Máy tính(s)") def test_nplurals(): """Test that we can find the wrong number of plural forms. Note that this test uses a UnitChecker, not a translation checker.""" checker = checks.StandardUnitChecker() unit = po.pounit("") unit.source = ["%d file", "%d files"] unit.target = [u"%d lêer", u"%d lêers"] assert checker.nplurals(unit) checker = checks.StandardUnitChecker(checks.CheckerConfig(targetlanguage='af')) unit.source = "%d files" unit.target = "%d lêer" assert checker.nplurals(unit) unit.source = ["%d file", "%d files"] unit.target = [u"%d lêer", u"%d lêers"] assert checker.nplurals(unit) unit.source = ["%d file", "%d files"] unit.target = [u"%d lêer", u"%d lêers", u"%d lêeeeers"] assert not checker.nplurals(unit) unit.source = ["%d file", "%d files"] unit.target = [u"%d lêer"] assert not checker.nplurals(unit) checker = checks.StandardUnitChecker(checks.CheckerConfig(targetlanguage='km')) unit.source = "%d files" unit.target = "%d ឯកសារ" assert checker.nplurals(unit) unit.source = ["%d file", "%d files"] unit.target = [u"%d ឯកសារ"] assert checker.nplurals(unit) unit.source = ["%d file", "%d files"] unit.target = [u"%d ឯកសារ", u"%d lêers"] assert not checker.nplurals(unit) def test_credits(): """tests credits""" stdchecker = checks.StandardChecker() assert passes(stdchecker.credits, "File", "iFayile") assert passes(stdchecker.credits, "&File", "&Fayile") assert passes(stdchecker.credits, "translator-credits", "Ekke, ekke!") assert passes(stdchecker.credits, "Your names", "Ekke, ekke!") assert passes(stdchecker.credits, "ROLES_OF_TRANSLATORS", "Ekke, ekke!") kdechecker = checks.KdeChecker() assert passes(kdechecker.credits, "File", "iFayile") assert passes(kdechecker.credits, "&File", "&Fayile") assert passes(kdechecker.credits, "translator-credits", "Ekke, ekke!") assert fails(kdechecker.credits, "Your names", "Ekke, ekke!") assert fails(kdechecker.credits, "ROLES_OF_TRANSLATORS", "Ekke, ekke!") gnomechecker = checks.GnomeChecker() assert passes(gnomechecker.credits, "File", "iFayile") assert passes(gnomechecker.credits, "&File", "&Fayile") assert fails(gnomechecker.credits, "translator-credits", "Ekke, ekke!") assert passes(gnomechecker.credits, "Your names", "Ekke, ekke!") assert passes(gnomechecker.credits, "ROLES_OF_TRANSLATORS", "Ekke, ekke!") def test_gconf(): """test GNOME gconf errors""" gnomechecker = checks.GnomeChecker() # Let's cheat a bit and prepare the checker as the run_filters() method # would do by adding locations needed by the gconf test gnomechecker.locations = [] assert passes(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_setting"') assert passes(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_steling"') gnomechecker.locations = ['file.schemas.in.h:24'] assert passes(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_setting"') assert fails(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_steling"') # redo the same, but with the new location comment: gnomechecker.locations = ['file.gschema.xml.in.in.h:24'] assert passes(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_setting"') assert fails(gnomechecker.gconf, 'Blah "gconf_setting"', 'Bleh "gconf_steling"') def test_hassuggestion(): """test that hassuggestion() works""" checker = checks.StandardUnitChecker() po_store = po.pofile() po_store.addsourceunit("koeie") assert checker.hassuggestion(po_store.units[-1]) xliff_store = xliff.xlifffile.parsestring(''' <xliff version='1.2' xmlns='urn:oasis:names:tc:xliff:document:1.2'> <file original='hello.txt' source-language='en' target-language='fr' datatype='plaintext'> <body> <trans-unit id='hi'> <source>Hello world</source> <target>Bonjour le monde</target> <alt-trans> <target xml:lang='es'>Hola mundo</target> </alt-trans> </trans-unit> </body> </file> </xliff> ''') assert not checker.hassuggestion(xliff_store.units[0]) def test_dialogsizes(): """test Mozilla dialog sizes""" mozillachecker = checks.MozillaChecker() assert passes(mozillachecker.dialogsizes, 'width: 12em;', 'width: 12em;') assert passes(mozillachecker.dialogsizes, 'width: 12em; height: 36em', 'width: 12em; height: 36em') assert fails(mozillachecker.dialogsizes, 'height: 12em;', 'hoogde: 12em;') assert passes(mozillachecker.dialogsizes, 'height: 12em;', 'height: 24px;') assert fails(mozillachecker.dialogsizes, 'height: 12em;', 'height: 24xx;') assert fails(mozillachecker.dialogsizes, 'height: 12.5em;', 'height: 12,5em;')
""" This page is in the table of contents. Skeinlayer is an analyze viewer to display each layer of a gcode file. The skeinlayer manual page is at: http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Skeinlayer Skeinlayer is derived from Nophead's preview script. The extruded lines are in the resistor colors red, orange, yellow, green, blue, purple & brown. When the extruder is off, the travel line is grey. Skeinlayer is useful for a detailed view of the extrusion, skeiniso is better to see the orientation of the shape. To get an initial overview of the skein, when the skeinlayer display window appears, click the Soar button (double right arrow button beside the layer field). ==Operation== The default 'Activate Skeinlayer' checkbox is on. When it is on, the functions described below will work when called from the skeinforge toolchain, when it is off, the functions will not be called from the toolchain. The functions will still be called, whether or not the 'Activate Skeinlayer' checkbox is on, when skeinlayer is run directly. Skeinlayer has trouble separating the layers when it reads gcode without comments. ==Settings== ===Animation=== ====Animation Line Quickening==== Default is one. The quickness of the tool animation over the quickness of the actual tool. ====Animation Slide Show Rate==== Default is two layers per second. The rate, in layers per second, at which the layer changes when the soar or dive button is pressed.. ===Draw Arrows=== Default is on. When selected, arrows will be drawn at the end of each line segment. ===Export Menu=== When the submenu in the export menu item in the file menu is clicked, an export canvas dialog will be displayed, which can export the canvas to a file. ===Go Around Extruder Off Travel=== Default is off. When selected, the display will include the travel when the extruder is off, which means it will include the nozzle wipe path if any. ===Layers=== ====Layer==== Default is zero. On the display window, the Up button increases the 'Layer' by one, and the Down button decreases the layer by one. When the layer displayed in the layer spin box is changed then <Return> is hit, the layer shown will be set to the spin box, to a mimimum of zero and to a maximum of the highest index layer.The Soar button increases the layer at the 'Animation Slide Show Rate', and the Dive (double left arrow button beside the layer field) button decreases the layer at the slide show rate. ====Layer Extra Span==== Default is zero. The viewer will draw the layers in the range including the 'Layer' index and the 'Layer' index plus the 'Layer Extra Span'. If the 'Layer Extra Span' is negative, the layers viewed will start at the 'Layer' index, plus the 'Layer Extra Span', and go up to and include the 'Layer' index. If the 'Layer Extra Span' is zero, only the 'Layer' index layer will be displayed. If the 'Layer Extra Span' is positive, the layers viewed will start at the 'Layer' index, and go up to and include the 'Layer' index plus the 'Layer Extra Span'. ===Line=== Default is zero. The index of the selected line on the layer that is highlighted when the 'Display Line' mouse tool is chosen. The line spin box up button increases the 'Line' by one. If the line index of the layer goes over the index of the last line, the layer index will be increased by one and the new line index will be zero. The down button decreases the line index by one. If the line index goes below the index of the first line, the layer index will be decreased by one and the new line index will be at the last line. When the line displayed in the line field is changed then <Return> is hit, the line shown will be set to the line field, to a mimimum of zero and to a maximum of the highest index line. The Soar button increases the line at the speed at which the extruder would move, times the 'Animation Line Quickening' ratio, and the Dive (double left arrow button beside the line field) button decreases the line at the animation line quickening ratio. ===Mouse Mode=== Default is 'Display Line'. The mouse tool can be changed from the 'Mouse Mode' menu button or picture button. The mouse tools listen to the arrow keys when the canvas has the focus. Clicking in the canvas gives the canvas the focus, and when the canvas has the focus a thick black border is drawn around the canvas. ====Display Line==== The 'Display Line' tool will display the highlight the selected line, and display the file line count, counting from one, and the gcode line itself. When the 'Display Line' tool is active, clicking the canvas will select the closest line to the mouse click. ====Viewpoint Move==== The 'Viewpoint Move' tool will move the viewpoint in the xy plane when the mouse is clicked and dragged on the canvas. ===Numeric Pointer=== Default is on. When selected, the distance along the ruler of the arrow pointers will be drawn next to the pointers. ===Scale=== Default is ten. The scale setting is the scale of the image in pixels per millimeter, the higher the number, the greater the size of the display. The zoom in mouse tool will zoom in the display at the point where the mouse was clicked, increasing the scale by a factor of two. The zoom out tool will zoom out the display at the point where the mouse was clicked, decreasing the scale by a factor of two. ===Screen Inset=== ====Screen Horizontal Inset==== Default is one hundred. The "Screen Horizontal Inset" determines how much the canvas will be inset in the horizontal direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be. ====Screen Vertical Inset==== Default is two hundred and twenty. The "Screen Vertical Inset" determines how much the canvas will be inset in the vertical direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be. ===Width=== The width of each type of thread and of each axis can be changed. If the width is set to zero, the thread will not be visible. ====Width of Extrusion Thread==== Default is three. The "Width of Extrusion Thread" sets the width of the extrusion threads. ====Width of Selection Thread==== Default is six. The "Width of Selection Thread" sets the width of the selected line. ====Width of Travel Thread==== Default is one. The "Width of Travel Thread" sets the width of the grey extruder off travel threads. ==Icons== The dive, soar and zoom icons are from Mark James' soarSilk icon set 1.3 at: http://www.famfamfam.com/lab/icons/silk/ ==Gcodes== An explanation of the gcodes is at: http://reprap.org/bin/view/Main/Arduino_GCode_Interpreter and at: http://reprap.org/bin/view/Main/MCodeReference A gode example is at: http://forums.reprap.org/file.php?12,file=565 ==Examples== Below are examples of skeinlayer being used. These examples are run in a terminal in the folder which contains Screw Holder_penultimate.gcode and skeinlayer.py. > python skeinlayer.py This brings up the skeinlayer dialog. > python skeinlayer.py Screw Holder_penultimate.gcode This brings up the skeinlayer viewer to view each layer of a gcode file. """ from __future__ import absolute_import import __init__ from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import display_line from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import tableau from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import view_move from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import os import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getNewRepository(): 'Get new repository.' return SkeinlayerRepository() def getRankIndex( rulingSeparationWidthMillimeters, screenOrdinate ): "Get rank index." return int( round( screenOrdinate / rulingSeparationWidthMillimeters ) ) def getWindowAnalyzeFile(fileName): "Display a gcode file in a skeinlayer window." gcodeText = archive.getFileText(fileName) return getWindowAnalyzeFileGivenText(fileName, gcodeText) def getWindowAnalyzeFileGivenText( fileName, gcodeText, repository=None): "Display a gcode file in a skeinlayer window given the text." if gcodeText == '': return None if repository == None: repository = settings.getReadRepository( SkeinlayerRepository() ) skeinWindow = getWindowGivenTextRepository( fileName, gcodeText, repository ) skeinWindow.updateDeiconify() return skeinWindow def getWindowGivenTextRepository( fileName, gcodeText, repository ): "Display a gcode file in a skeinlayer window given the text and settings." skein = SkeinlayerSkein() skein.parseGcode( fileName, gcodeText, repository ) return SkeinWindow( repository, skein ) def writeOutput(fileName, fileNamePenultimate, fileNameSuffix, filePenultimateWritten, gcodeText=''): "Display a skeinlayered gcode file for a skeinforge gcode file, if 'Activate Skeinlayer' is selected." try: import Tkinter except: try: import tkinter as Tkinter except: print('Warning, skeinlayer will do nothing because Tkinter is not installed.') return repository = settings.getReadRepository( SkeinlayerRepository() ) if repository.activateSkeinlayer.value: gcodeText = archive.getTextIfEmpty( fileNameSuffix, gcodeText ) return getWindowAnalyzeFileGivenText( fileNameSuffix, gcodeText, repository ) class SkeinlayerRepository( tableau.TableauRepository ): "A class to handle the skeinlayer settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.analyze_plugins.skeinlayer.html', self) self.baseNameSynonym = 'skeinview.csv' self.fileNameInput = settings.FileNameInput().getFromFileName( [ ('Gcode text files', '*.gcode') ], 'Open File for Skeinlayer', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Skeinlayer') self.activateSkeinlayer = settings.BooleanSetting().getFromValue('Activate Skeinlayer', self, True ) self.addAnimation() self.drawArrows = settings.BooleanSetting().getFromValue('Draw Arrows', self, True ) self.goAroundExtruderOffTravel = settings.BooleanSetting().getFromValue('Go Around Extruder Off Travel', self, False ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Layers -', self ) self.layer = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Layer (index):', self, 912345678, 0 ) self.layerExtraSpan = settings.IntSpinUpdate().getSingleIncrementFromValue( - 3, 'Layer Extra Span (integer):', self, 3, 0 ) settings.LabelSeparator().getFromRepository(self) self.line = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Line (index):', self, 912345678, 0 ) self.mouseMode = settings.MenuButtonDisplay().getFromName('Mouse Mode:', self ) self.displayLine = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'Display Line', self, True ) self.viewMove = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'View Move', self, False ) self.addScaleScreenSlide() self.showPosition = settings.BooleanSetting().getFromValue('Show Position', self, True ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Width -', self ) self.widthOfExtrusionThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Extrusion Thread (pixels):', self, 5, 3 ) self.widthOfSelectionThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Selection Thread (pixels):', self, 10, 6 ) self.widthOfTravelThread = settings.IntSpinUpdate().getSingleIncrementFromValue( 0, 'Width of Travel Thread (pixels):', self, 5, 1 ) self.executeTitle = 'Skeinlayer' def execute(self): "Write button has been clicked." fileNames = skeinforge_polyfile.getFileOrGcodeDirectory( self.fileNameInput.value, self.fileNameInput.wasCancelled ) for fileName in fileNames: getWindowAnalyzeFile(fileName) class SkeinlayerSkein: "A class to write a get a scalable vector graphics text for a gcode skein." def __init__(self): 'Initialize.' self.extrusionNumber = 0 self.feedRateMinute = 960.1 self.isThereALayerStartWord = False self.layerCount = settings.LayerCount() self.oldZ = - 999987654321.0 self.skeinPane = None self.skeinPanes = [] def addToPath( self, line, location ): "Add a point to travel and maybe extrusion." if self.oldLocation == None: return colorName = 'gray' locationComplex = location.dropAxis() oldLocationComplex = self.oldLocation.dropAxis() begin = self.getScreenCoordinates( oldLocationComplex ) end = self.getScreenCoordinates( locationComplex ) if self.extruderActive: colorName = self.colorNames[ self.extrusionNumber % len( self.colorNames ) ] displayString = '%s %s' % ( self.lineIndex + 1, line ) tagString = 'colored_line_index: %s %s' % ( len( self.skeinPane ), len( self.skeinPanes ) - 1 ) coloredLine = tableau.ColoredLine( begin, colorName, displayString, end, tagString ) coloredLine.isExtrusionThread = self.extruderActive self.skeinPane.append( coloredLine ) def getModelCoordinates( self, screenCoordinates ): "Get the model coordinates." modelCoordinates = ( screenCoordinates + self.marginCornerLow ) / self.scale return complex( modelCoordinates.real, self.cornerImaginaryTotal - modelCoordinates.imag ) def getScreenCoordinates( self, pointComplex ): "Get the screen coordinates." pointComplex = complex( pointComplex.real, self.cornerImaginaryTotal - pointComplex.imag ) return self.scale * pointComplex - self.marginCornerLow def initializeActiveLocation(self): "Set variables to default." self.extruderActive = False self.oldLocation = None def linearCorner( self, splitLine ): "Update the bounding corners." location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) if self.extruderActive or self.repository.goAroundExtruderOffTravel.value: self.cornerMaximum.maximize(location) self.cornerMinimum.minimize(location) self.oldLocation = location def linearMove( self, line, location ): "Get statistics for a linear move." if self.skeinPane != None: self.addToPath(line, location) def parseCorner(self, line): "Parse a gcode line and use the location to update the bounding corners." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if firstWord == 'G1': self.linearCorner(splitLine) elif firstWord == 'M101': self.extruderActive = True elif firstWord == 'M103': self.extruderActive = False def parseGcode( self, fileName, gcodeText, repository ): "Parse gcode text and store the vector output." self.fileName = fileName self.gcodeText = gcodeText self.repository = repository self.initializeActiveLocation() self.cornerMaximum = Vector3(-987654321.0, -987654321.0, -987654321.0) self.cornerMinimum = Vector3(987654321.0, 987654321.0, 987654321.0) self.lines = archive.getTextLines(gcodeText) self.isThereALayerStartWord = (gcodec.getFirstWordIndexReverse('(<layer>', self.lines, 1) > -1) self.parseInitialization() for line in self.lines[self.lineIndex :]: self.parseCorner(line) self.cornerMaximumComplex = self.cornerMaximum.dropAxis() self.cornerMinimumComplex = self.cornerMinimum.dropAxis() self.scale = repository.scale.value self.scaleCornerHigh = self.scale * self.cornerMaximumComplex self.scaleCornerLow = self.scale * self.cornerMinimumComplex self.cornerImaginaryTotal = self.cornerMaximum.y + self.cornerMinimum.y self.margin = complex( 10.0, 10.0 ) self.marginCornerHigh = self.scaleCornerHigh + self.margin self.marginCornerLow = self.scaleCornerLow - self.margin self.screenSize = self.marginCornerHigh - self.marginCornerLow self.initializeActiveLocation() self.colorNames = ['brown', 'red', 'orange', 'yellow', 'green', 'blue', 'purple'] for self.lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[self.lineIndex] self.parseLine(line) def parseInitialization(self): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if firstWord == '(</extruderInitialization>)': return elif firstWord == '(<operatingFeedRatePerSecond>': self.feedRateMinute = 60.0 * float(splitLine[1]) self.lineIndex = 0 def parseLine(self, line): "Parse a gcode line and add it to the vector output." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if tableau.getIsLayerStart(firstWord, self, splitLine): self.extrusionNumber = 0 self.layerCount.printProgressIncrement('skeinlayer') self.skeinPane = [] self.skeinPanes.append( self.skeinPane ) if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) self.linearMove(line, location) self.oldLocation = location elif firstWord == 'M101': self.extruderActive = True self.extrusionNumber += 1 elif firstWord == 'M103': self.extruderActive = False if firstWord == 'G2' or firstWord == 'G3': relativeLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) relativeLocation.z = 0.0 location = self.oldLocation + relativeLocation self.linearMove(line, location) self.oldLocation = location class SkeinWindow( tableau.TableauWindow ): def __init__(self, repository, skein): "Initialize the skein window.setWindowNewMouseTool" self.addCanvasMenuRootScrollSkein(repository, skein, '_skeinlayer', 'Skeinlayer') horizontalRulerBoundingBox = (0, 0, int( skein.screenSize.real ), self.rulingExtent) self.horizontalRulerCanvas = settings.Tkinter.Canvas(self.root, width = self.canvasWidth, height = self.rulingExtent, scrollregion=horizontalRulerBoundingBox) self.horizontalRulerCanvas.grid(row=1, column=2, columnspan=96, sticky=settings.Tkinter.E+settings.Tkinter.W) self.horizontalRulerCanvas['xscrollcommand'] = self.xScrollbar.set verticalRulerBoundingBox = (0, 0, self.rulingExtent, int(skein.screenSize.imag)) self.verticalRulerCanvas = settings.Tkinter.Canvas(self.root, width=self.rulingExtent, height=self.canvasHeight, scrollregion=verticalRulerBoundingBox) self.verticalRulerCanvas.grid(row=2, rowspan=96, column=1, sticky=settings.Tkinter.N+settings.Tkinter.S) self.verticalRulerCanvas['yscrollcommand'] = self.yScrollbar.set self.xStringVar = settings.Tkinter.StringVar(self.root) self.xLabel = settings.Tkinter.Label(self.root, textvariable=self.xStringVar) self.xLabel.grid(row=0, column=3, sticky=settings.Tkinter.W) self.yStringVar = settings.Tkinter.StringVar(self.root) self.yLabel = settings.Tkinter.Label(self.root, textvariable=self.yStringVar) self.yLabel.grid(row=0, column=4, sticky=settings.Tkinter.W) self.setWindowNewMouseTool(display_line.getNewMouseTool, repository.displayLine) self.setWindowNewMouseTool(view_move.getNewMouseTool, repository.viewMove) repository.showPosition.setUpdateFunction(self.setWindowToDisplaySaveUpdate) repository.widthOfExtrusionThread.setUpdateFunction(self.setWindowToDisplaySaveUpdate) self.addMouseToolsBind() self.createRulers() def addHorizontalRulerRuling( self, xMillimeters ): "Add a ruling to the horizontal ruler." xPixel = self.skein.getScreenCoordinates( complex( xMillimeters, 0.0 ) ).real self.createVerticalLine( 0.0, xPixel ) self.horizontalRulerCanvas.create_text( xPixel + 2, 0, anchor = settings.Tkinter.NW, text = self.getRoundedRulingText( 1, xMillimeters ) ) cumulativeDistance = xMillimeters self.createVerticalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( xMillimeters + self.separationWidthMillimetersTenth, 0.0 ) ).real ) for subRulingIndex in xrange(4): cumulativeDistance += self.separationWidthMillimetersFifth self.createVerticalLine( self.rulingExtentShort, self.skein.getScreenCoordinates( complex( cumulativeDistance, 0.0 ) ).real ) self.createVerticalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( cumulativeDistance + self.separationWidthMillimetersTenth, 0.0 ) ).real ) def addVerticalRulerRuling( self, yMillimeters ): "Add a ruling to the vertical ruler." fontHeight = 12 yPixel = self.skein.getScreenCoordinates( complex( 0.0, yMillimeters ) ).imag self.createHorizontalLine( 0.0, yPixel ) yPixel += 2 roundedRulingText = self.getRoundedRulingText( 1, yMillimeters ) effectiveRulingTextLength = len( roundedRulingText ) if roundedRulingText.find('.') != - 1: effectiveRulingTextLength -= 1 cumulativeDistance = yMillimeters self.createHorizontalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( 0.0, yMillimeters + self.separationWidthMillimetersTenth ) ).imag ) for subRulingIndex in xrange(4): cumulativeDistance += self.separationWidthMillimetersFifth self.createHorizontalLine( self.rulingExtentShort, self.skein.getScreenCoordinates( complex( 0.0, cumulativeDistance ) ).imag ) self.createHorizontalLine( self.rulingExtentTiny, self.skein.getScreenCoordinates( complex( 0.0, cumulativeDistance + self.separationWidthMillimetersTenth ) ).imag ) if effectiveRulingTextLength < 4: self.verticalRulerCanvas.create_text( 0, yPixel, anchor = settings.Tkinter.NW, text = roundedRulingText ) return for character in roundedRulingText: if character == '.': yPixel -= fontHeight * 2 / 3 self.verticalRulerCanvas.create_text( 0, yPixel, anchor = settings.Tkinter.NW, text = character ) yPixel += fontHeight def createHorizontalLine( self, begin, yPixel ): "Create a horizontal line for the horizontal ruler." self.verticalRulerCanvas.create_line( begin, yPixel, self.rulingExtent, yPixel, fill = 'black') def createRulers(self): "Create the rulers.." self.rulingExtentShort = 0.382 * self.rulingExtent self.rulingExtentTiny = 0.764 * self.rulingExtent self.rulingExtentPointer = 0.5 * ( self.rulingExtentShort + self.rulingExtentTiny ) self.rulingPointerRadius = self.rulingExtent - self.rulingExtentPointer self.textBoxHeight = int( 0.8 * self.rulingExtent ) self.textBoxWidth = int( 2.5 * self.rulingExtent ) self.separationWidthMillimetersFifth = 0.2 * self.rulingSeparationWidthMillimeters self.separationWidthMillimetersTenth = 0.1 * self.rulingSeparationWidthMillimeters rulingSeparationWidthPixels = self.getRulingSeparationWidthPixels( self.rank ) marginOverScale = self.skein.margin / self.skein.scale cornerMaximumMargin = self.skein.cornerMaximumComplex + marginOverScale cornerMinimumMargin = self.skein.cornerMinimumComplex - marginOverScale xRankIndexHigh = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMaximumMargin.real ) xRankIndexLow = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMinimumMargin.real ) for xRankIndex in xrange( xRankIndexLow - 2, xRankIndexHigh + 2 ): # 1 is enough, 2 is to be on the safe side self.addHorizontalRulerRuling( xRankIndex * self.rulingSeparationWidthMillimeters ) yRankIndexHigh = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMaximumMargin.imag ) yRankIndexLow = getRankIndex( self.rulingSeparationWidthMillimeters, cornerMinimumMargin.imag ) for yRankIndex in xrange( yRankIndexLow - 2, yRankIndexHigh + 2 ): # 1 is enough, 2 is to be on the safe side self.addVerticalRulerRuling( yRankIndex * self.rulingSeparationWidthMillimeters ) def createVerticalLine( self, begin, xPixel ): "Create a vertical line for the horizontal ruler." self.horizontalRulerCanvas.create_line( xPixel, begin, xPixel, self.rulingExtent, fill = 'black') def getColoredLines(self): "Get the colored lines from the skein pane." if len(self.skeinPanes) == 0: return [] return self.skeinPanes[self.repository.layer.value] def getCopy(self): "Get a copy of this window." return SkeinWindow(self.repository, self.skein) def getCopyWithNewSkein(self): "Get a copy of this window with a new skein." return getWindowGivenTextRepository( self.skein.fileName, self.skein.gcodeText, self.repository ) def getDrawnColoredLine( self, coloredLine, tags, width ): "Get the drawn colored line." return self.canvas.create_line( coloredLine.begin.real, coloredLine.begin.imag, coloredLine.end.real, coloredLine.end.imag, fill = coloredLine.colorName, arrow = self.arrowType, tags = tags, width = width ) def getDrawnColoredLineIfThick( self, coloredLine, width ): "Get the drawn colored line if it has a positive thickness." if width > 0: return self.getDrawnColoredLine( coloredLine, coloredLine.tagString, width ) def getDrawnSelectedColoredLine(self, coloredLine): "Get the drawn selected colored line." return self.getDrawnColoredLine(coloredLine, 'selection_line', self.repository.widthOfSelectionThread.value) def motion(self, event): "The mouse moved." self.mouseTool.motion(event) xString = '' yString = '' x = self.canvas.canvasx( event.x ) y = self.canvas.canvasy( event.y ) self.horizontalRulerCanvas.delete('pointer') self.horizontalRulerCanvas.create_polygon( x - self.rulingPointerRadius, self.rulingExtentPointer, x + self.rulingPointerRadius, self.rulingExtentPointer, x, self.rulingExtent, tag = 'pointer') self.verticalRulerCanvas.delete('pointer') self.verticalRulerCanvas.create_polygon( self.rulingExtentPointer, y - self.rulingPointerRadius, self.rulingExtentPointer, y + self.rulingPointerRadius, self.rulingExtent, y, tag = 'pointer') if self.repository.showPosition.value: motionCoordinate = complex(x, y) modelCoordinates = self.skein.getModelCoordinates( motionCoordinate ) roundedXText = self.getRoundedRulingText(3, modelCoordinates.real) roundedYText = self.getRoundedRulingText(3, modelCoordinates.imag) xString = 'X: ' + roundedXText yString = 'Y: ' + roundedYText self.xStringVar.set(xString) self.yStringVar.set(yString) def qqqmotion(self, event): "The mouse moved." self.mouseTool.motion(event) x = self.canvas.canvasx( event.x ) y = self.canvas.canvasy( event.y ) self.horizontalRulerCanvas.delete('pointer') self.horizontalRulerCanvas.create_polygon( x - self.rulingPointerRadius, self.rulingExtentPointer, x + self.rulingPointerRadius, self.rulingExtentPointer, x, self.rulingExtent, tag = 'pointer') self.verticalRulerCanvas.delete('pointer') self.verticalRulerCanvas.create_polygon( self.rulingExtentPointer, y - self.rulingPointerRadius, self.rulingExtentPointer, y + self.rulingPointerRadius, self.rulingExtent, y, tag = 'pointer') if not self.repository.numericPointer.value: return motionCoordinate = complex(x, y) modelCoordinates = self.skein.getModelCoordinates( motionCoordinate ) roundedXText = self.getRoundedRulingText( 3, modelCoordinates.real ) yStart = self.canvas.canvasy( 0 ) self.canvas.create_rectangle( x - 2, yStart, x + self.textBoxWidth, yStart + self.textBoxHeight + 5, fill = self.canvas['background'], tag = 'pointer') self.canvas.create_text( x, yStart + 5, anchor = settings.Tkinter.NW, tag = 'pointer', text = roundedXText ) roundedYText = self.getRoundedRulingText( 3, modelCoordinates.imag ) xStart = self.canvas.canvasx( 0 ) self.canvas.create_rectangle( xStart, y - 2, xStart + self.textBoxWidth + 5, y + self.textBoxHeight, fill = self.canvas['background'], tag = 'pointer') self.canvas.create_text( xStart + 5, y, anchor = settings.Tkinter.NW, tag = 'pointer', text = roundedYText ) xString = '' xString = 'X: ' + roundedXText self.xStringVar.set(xString) def relayXview( self, *args ): "Relay xview changes." self.canvas.xview( *args ) self.horizontalRulerCanvas.xview( *args ) def relayYview( self, *args ): "Relay yview changes." self.canvas.yview( *args ) self.verticalRulerCanvas.yview( *args ) def update(self): "Update the window." if len( self.skeinPanes ) < 1: return self.limitIndexSetArrowMouseDeleteCanvas() for coloredLines in self.getUpdateSkeinPanes(): for coloredLine in coloredLines: if coloredLine.isExtrusionThread: self.getDrawnColoredLineIfThick( coloredLine, self.repository.widthOfExtrusionThread.value ) else: self.getDrawnColoredLineIfThick( coloredLine, self.repository.widthOfTravelThread.value ) self.setDisplayLayerIndex() def main(): "Display the skeinlayer dialog." if len(sys.argv) > 1: settings.startMainLoopFromWindow(getWindowAnalyzeFile(' '.join(sys.argv[1 :]))) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == "__main__": main()
"""Persistent identifier minters.""" from __future__ import absolute_import, print_function from .providers import CDSRecordIdProvider def recid_minter(record_uuid, data): """Mint record identifiers.""" assert 'recid' not in data provider = CDSRecordIdProvider.create( object_type='rec', object_uuid=record_uuid) data['recid'] = int(provider.pid.pid_value) return provider.pid
import re import scipy from scipy import optimize from scipy import linalg from pylab import * def read_log(ac_id, filename, sensor): f = open(filename, 'r') pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)") list_meas = [] while 1: line = f.readline().strip() if line == '': break m=re.match(pattern, line) if m: list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))]) return scipy.array(list_meas) def filter_meas(meas, window_size, noise_threshold): filtered_meas = [] filtered_idx = [] for i in range(window_size,len(meas)-window_size): noise = meas[i-window_size:i+window_size,:].std(axis=0) if linalg.norm(noise) < noise_threshold: filtered_meas.append(meas[i,:]) filtered_idx.append(i) return scipy.array(filtered_meas), filtered_idx def get_min_max_guess(meas, scale): max_meas = meas[:,:].max(axis=0) min_meas = meas[:,:].min(axis=0) n = (max_meas + min_meas) / 2 sf = 2*scale/(max_meas - min_meas) return scipy.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]]) def scale_measurements(meas, p): l_comp = []; l_norm = []; for m in meas[:,]: sm = (m - p[0:3])*p[3:6] l_comp.append(sm) l_norm.append(linalg.norm(sm)) return scipy.array(l_comp), scipy.array(l_norm) def print_xml(p, sensor, res): print "" print "<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>" print "<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>" print "<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>" print "<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>" print "<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>" print "<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>" filename = 'log_accel_booz2_a2' ac_id = "151" if 1: sensor = "ACCEL" sensor_ref = 9.81 sensor_res = 10 noise_window = 20; noise_threshold = 40; else: sensor = "MAG" sensor_ref = 1. sensor_res = 11 noise_window = 10; noise_threshold = 1000; print "reading file "+filename+" for aircraft "+ac_id+" and sensor "+sensor measurements = read_log(ac_id, filename, sensor) print "found "+str(len(measurements))+" records" flt_meas, flt_idx = filter_meas(measurements, noise_window, noise_threshold) print "remaining "+str(len(flt_meas))+" after low pass" p0 = get_min_max_guess(flt_meas, sensor_ref) cp0, np0 = scale_measurements(flt_meas, p0) print "initial guess : "+str(np0.mean())+" "+str(np0.std()) print p0 def err_func(p,meas,y): cp, np = scale_measurements(meas, p) err = y*scipy.ones(len(meas)) - np return err p1, success = optimize.leastsq(err_func, p0[:], args=(flt_meas, sensor_ref)) cp1, np1 = scale_measurements(flt_meas, p1) print "optimized guess : "+str(np1.mean())+" "+str(np1.std()) print p1 print_xml(p1, sensor, sensor_res) subplot(3,1,1) plot(measurements[:,0]) plot(measurements[:,1]) plot(measurements[:,2]) plot(flt_idx, flt_meas[:,0], 'ro') plot(flt_idx, flt_meas[:,1], 'ro') plot(flt_idx, flt_meas[:,2], 'ro') subplot(3,2,3) plot(cp0[:,0]); plot(cp0[:,1]); plot(cp0[:,2]); plot(-sensor_ref*scipy.ones(len(flt_meas))); plot(sensor_ref*scipy.ones(len(flt_meas))); subplot(3,2,4) plot(np0); plot(sensor_ref*scipy.ones(len(flt_meas))); subplot(3,2,5) plot(cp1[:,0]); plot(cp1[:,1]); plot(cp1[:,2]); plot(-sensor_ref*scipy.ones(len(flt_meas))); plot(sensor_ref*scipy.ones(len(flt_meas))); subplot(3,2,6) plot(np1); plot(sensor_ref*scipy.ones(len(flt_meas))); show();
import os from setuptools import setup readme = os.path.join(os.path.dirname(__file__), 'README.md') setup(name = 'bottleneck', version = '0.1.0', description = 'performance report generator for OpenMP programs in GNU/Linux', long_description = open(readme).read(), author = 'Andres More', author_email='more.andres@gmail.com', url='https://github.com/moreandres/bottleneck.git', packages= [ 'bottleneck' ], entry_points = { 'console_scripts': [ 'bt = bottleneck.bottleneck:main' ] }, data_files = [ ( 'config', [ 'cfg/bt.cfg', 'cfg/bt.tex' ] ) ], classifiers = [ 'Development Status :: 1 - Planning', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Operating System :: POSIX', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Software Development :: Quality Assurance', 'Topic :: System :: Benchmark', 'Topic :: Utilities', ], zip_safe = False, test_suite = 'tests', )
import os from PyQt4.QtCore import pyqtSignal from PyQt4.QtGui import QComboBox, QDoubleValidator from configmanager.editorwidgets.core import ConfigWidget from configmanager.editorwidgets.uifiles.ui_numberwidget_config import Ui_Form class NumberWidgetConfig(Ui_Form, ConfigWidget): description = 'Number entry widget' def __init__(self, parent=None): super(NumberWidgetConfig, self).__init__(parent) self.setupUi(self) self.minEdit.setValidator( QDoubleValidator() ) self.maxEdit.setValidator( QDoubleValidator() ) self.minEdit.textChanged.connect(self.widgetchanged) self.maxEdit.textChanged.connect(self.widgetchanged) self.prefixEdit.textChanged.connect(self.widgetchanged) self.suffixEdit.textChanged.connect(self.widgetchanged) def getconfig(self): config = {} config['max'] = self.maxEdit.text() config['min'] = self.minEdit.text() config['prefix'] = self.prefixEdit.text() config['suffix'] = self.suffixEdit.text() return config def setconfig(self, config): self.blockSignals(True) max = config.get('max', '') min = config.get('min', '') prefix = config.get('prefix', '') suffix = config.get('suffix', '') self.minEdit.setText(min) self.maxEdit.setText(max) self.prefixEdit.setText(prefix) self.suffixEdit.setText(suffix) self.blockSignals(False)
""" This is the main widget of the xc2424scan application This widget is self contained and can be included in any other Qt4 application. """ __all__ = ["ScanWidget"] from PyQt4.QtCore import QDir, QObject, QRect, Qt, SIGNAL from PyQt4.QtGui import QWidget, QFileDialog, QListWidgetItem, QPixmap, \ QIcon, QMessageBox, QInputDialog, QLineEdit, QPainter, \ QProgressDialog, QMessageBox, QSizePolicy, QDialog, \ QLabel, QVBoxLayout, QHBoxLayout, QSpacerItem, \ QSizePolicy, QPushButton import os from xc2424scan import config from xc2424scan.threadedscanlib import ThreadedXeroxC2424 from xc2424scan.scanlib import ProtectedError, SocketError, NoPreviewError from xc2424scan.ui.widgets.scanwidgetbase import Ui_ScanWidgetBase class ProgressFullDialog(QProgressDialog): def __init__(self, parent = None): QProgressDialog.__init__(self, parent) self.setWindowTitle(_("Downloading")) # Top level fixed size dialog self.setWindowModality(Qt.WindowModal) # Do not close when reaching 100% self.setAutoClose(False) self.setAutoReset(False) self.__nbr_pages_ = -1 def setNbrPages(self, nbr_pages): self.__nbr_pages_ = nbr_pages def newpage(self, current_page, file_size): if self.isVisible(): # Set progress value to 0 and range to file size self.setValue(0) self.setRange(0, file_size) # Set label text if self.__nbr_pages_ == 1: self.setLabelText(_("Getting page %d") % current_page) else: self.setLabelText(_("Getting page %d of %d") % \ (current_page, self.__nbr_pages_)) def progress(self, received_size): if self.isVisible(): self.setValue(self.value() + received_size) class ProgressDialog(QDialog): def __init__(self, parent = None): QDialog.__init__(self, parent) self.setWindowTitle(_("Downloading")) # Top level fixed size dialog self.setWindowModality(Qt.WindowModal) self.__page_ = QLabel(self) self.__progress_ = QLabel(self) self.__cancel_ = QPushButton(self) self.__downloaded_ = 0 self.__nbr_pages_ = 0 vboxlayout = QVBoxLayout(self) # Page status labellayout = QHBoxLayout() labellayout.addItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)) labellayout.addWidget(self.__page_) labellayout.addItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)) vboxlayout.addLayout(labellayout) # Progress status progresslayout = QHBoxLayout() progresslayout.addItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)) progresslayout.addWidget(self.__progress_) progresslayout.addItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)) vboxlayout.addLayout(progresslayout) # Cancel button cancellayout = QHBoxLayout() cancellayout.addItem(QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)) cancellayout.addWidget(self.__cancel_) vboxlayout.addLayout(cancellayout) self.__cancel_.setDefault(True) self.__cancel_.setText("Cancel") QObject.connect(self.__cancel_, SIGNAL("clicked()"), self.__ui_progress_canceled_) QObject.connect(self, SIGNAL("rejected()"), self.__ui_progress_canceled_) def __ui_progress_canceled_(self): self.emit(SIGNAL("canceled()")) def setLabelText(self, text): self.__page_.setText(text) def setValue(self, value): self.__downloaded_ = value self.progress(0) def setNbrPages(self, nbr_pages): self.__nbr_pages_ = nbr_pages def newpage(self, current_page, file_size = None): if self.isVisible(): # Set progress value to 0 self.setValue(0) # Set label text if self.__nbr_pages_ == 0: # Only happens when getting a pdf file self.__page_.setText(_("Getting file")) elif self.__nbr_pages_ == 1: self.__page_.setText(_("Getting page %d") % current_page) else: self.__page_.setText(_("Getting page %d of %d") % \ (current_page, self.__nbr_pages_)) def progress(self, received_size): self.__downloaded_ += received_size if self.isVisible(): size = self.__downloaded_ / 1024 if size > 1024: size = float(size) / 1024 self.__progress_.setText("Received %.3f mb" % size) else: self.__progress_.setText("Received %d kb" % size) class ProgressWrapper(QObject): def __init__(self, parent = None): QObject.__init__(self) self.__progress_full_ = ProgressFullDialog(parent) self.__progress_ = ProgressDialog(parent) self.__current_ = None QObject.connect(self.__progress_full_, SIGNAL("canceled()"), self.__ui_progress_canceled_) QObject.connect(self.__progress_, SIGNAL("canceled()"), self.__ui_progress_canceled_) def show(self, format, nbr_pages): if format in ["tiff", "bmp"]: self.__current_ = self.__progress_full_ else: self.__current_ = self.__progress_ self.__current_.setLabelText(_("Waiting for transfer to begin")) self.__current_.setValue(0) self.__current_.setNbrPages(nbr_pages) self.__current_.show() def __ui_progress_canceled_(self): self.emit(SIGNAL("canceled()")) def newpage(self, current_page, file_size): if self.__current_ is not None: self.__current_.newpage(current_page, file_size) def progress(self, received_size): if self.__current_ is not None: self.__current_.progress(received_size) def isVisible(self): if self.__current_ is not None: return self.__current_.isVisible() else: return False def hide(self): if self.__current_ is not None: self.__current_.hide() class ScanWidget(QWidget): """The main scanning widget""" def __init__(self, parent = None): """Create a new scanning widget @param parent: The parent widget @type parent: QWidget """ QWidget.__init__(self, parent) self.__basewidget_ = Ui_ScanWidgetBase() self.__basewidget_.setupUi(self) # The threaded scanner object self.__scanner_ = ThreadedXeroxC2424() # List of files available on the scanner self.__scanned_files_ = None # Last folder visited self.__old_folder_ = "Public" # Progress dialog self.__progress_ = ProgressWrapper(self) # UI: Buttons QObject.connect(self.__basewidget_.refresh, SIGNAL("clicked()"), self.__ui_refresh_clicked_) QObject.connect(self.__basewidget_.delete, SIGNAL("clicked()"), self.__ui_delete_clicked_) QObject.connect(self.__basewidget_.save, SIGNAL("clicked()"), self.__ui_save_clicked_) # UI: An option has been modified QObject.connect(self.__basewidget_.folder, SIGNAL("activated(const QString&)"), self.__ui_folder_currentChanged_) # UI: List widget QObject.connect(self.__basewidget_.imageList, SIGNAL("currentTextChanged(const QString&)"), self.__ui_imageList_currentChanged_) QObject.connect(self.__basewidget_.format, SIGNAL("currentIndexChanged(const QString&)"), self.__ui_format_currentChanged_) # Signals emited from threads QObject.connect(self.__scanner_, SIGNAL("foldersList()"), self.__foldersListReceived_) QObject.connect(self.__scanner_, SIGNAL("filesList()"), self.__filesListReceived_) QObject.connect(self.__scanner_, SIGNAL("folderSet(const QString&)"), self.__folderSetReceived_) QObject.connect(self.__scanner_, SIGNAL("folderProtected(const QString&)"), self.__folderProtectedReceived_) QObject.connect(self.__scanner_, SIGNAL("fileReceived(const QString&)"), self.__fileReceived_) QObject.connect(self.__scanner_, SIGNAL("previewReceived(const QString&)"), self.__previewReceived_) QObject.connect(self.__scanner_, SIGNAL("allPreviewReceived()"), self.__allPreviewReceived_) QObject.connect(self.__scanner_, SIGNAL("fileDeleted(const QString&)"), self.__fileDeletedReceived_) QObject.connect(self.__scanner_, SIGNAL("connectedToScanner()"), self.__connectedToScannerReceived_) QObject.connect(self.__scanner_, SIGNAL("scanlibError(const QString&)"), self.__scanlibErrorReceived) QObject.connect(self.__scanner_, SIGNAL("newPage(int, int)"), self.__progress_.newpage) QObject.connect(self.__scanner_, SIGNAL("progress(int)"), self.__progress_.progress) # Progress dialog QObject.connect(self.__progress_, SIGNAL("canceled()"), self.__ui_progress_canceled_) self.__lock_() # # Methods connected to thread signals # def __scanlibErrorReceived(self, text): """Called when there is an error in the scan library @param text: The text of the error @type text: str """ if self.__progress_.isVisible(): self.__progress_.hide() QMessageBox.critical(self, "Critical error", text) if self.__scanner_.connected: self.__unlock_() def __connectedToScannerReceived_(self): """Called when we are connected to a new scanner""" # Show the public directory if config.DEBUG_GUI: print "<-- Connected to scanner" # Clear the list of files and request the available folders self.__basewidget_.imageList.clear() self.__scanner_.getFolders() def __folderSetReceived_(self, folder): """Called when we have changed the current folder @param folder: The folder name @type folder: str """ if config.DEBUG_GUI: print "<-- Folder has been set:", str(folder) # Save old folder self.__old_folder_ = str(folder) # Refresh the contents of the folder self.__refreshPreviews_() def __folderProtectedReceived_(self, folder): """Called when we are trying to access a protected folder @param folder: The folder name @type folder: str """ if config.DEBUG_GUI: print "<-- Protected folder:", folder folder = str(folder) password, result = QInputDialog.getText(self, "Accessing a protected folder", "Please enter the password for the protected " \ "folder %s" % folder, QLineEdit.Password) if result is True: self.__scanner_.setFolder(folder, str(password)) else: folder_index = self.__basewidget_.folder.findText(self.__old_folder_) self.__basewidget_.folder.setCurrentIndex(folder_index) self.__unlock_() def __fileReceived_(self, filename): """Called when a file tranfert has been successfully completed @param filename: The file name @type filename: str """ if config.DEBUG_GUI: print "<-- File transfer finished for:", filename # Reset the progress dialog and unlock the widget self.__progress_.hide() self.__unlock_() def __allPreviewReceived_(self): """Received when we have received all previews""" if config.DEBUG_GUI: print "<-- All previews received" self.__unlock_() self.__basewidget_.imageList.setCurrentItem(self.__basewidget_.imageList.item(0)) def __previewReceived_(self, filename): """Received when a preview has been received @param filename: The filename of the preview @type filename: str """ if config.DEBUG_GUI: print "<-- Preview received:", filename filename = str(filename) preview = self.__scanner_.previews[filename] del self.__scanner_.previews[filename] # Create the pixmap item pixmap = QPixmap() if preview == None: pixmap.load(config.NO_PREVIEW_FILENAME) else: pixmap.loadFromData(preview) # Add a black border self.__add_black_border_(pixmap) # Add the new icon to the list items = self.__basewidget_.imageList.findItems(filename, Qt.MatchExactly) items[0].setIcon(QIcon(pixmap)) def __fileDeletedReceived_(self, filename): """Called when a file has been deleted @param filename: The name of the deleted file @type filename: str """ if config.DEBUG_GUI: print "<-- File deleted:", filename # Remove the deleted item from the list items = self.__basewidget_.imageList.findItems(filename, Qt.MatchExactly) item = self.__basewidget_.imageList.takeItem(self.__basewidget_.imageList.row(items[0])) del item # Unlock the widget self.__unlock_() def __foldersListReceived_(self): """Called when the folders listing has arrived""" if config.DEBUG_GUI: print "<-- Received folder listing" # Add the folders to the list of folders for folder in self.__scanner_.folders: self.__basewidget_.folder.addItem(folder) # Refresh the files of the current folder self.__refreshPreviews_() def __filesListReceived_(self): """Called when the files listing of the current folder has arrived""" if config.DEBUG_GUI: print "<-- Received files listing" self.__scanned_files_ = self.__scanner_.files # Add the files to the list and request their previews if len(self.__scanned_files_) != 0: # Sort by filename (wich is also by date) filenames = self.__scanned_files_.keys() filenames.sort() # Create the Waiting for preview pixmap pixmap = QPixmap() pixmap.load(config.WAITING_PREVIEW_FILENAME) self.__add_black_border_(pixmap) # Add the files to the list for filename in filenames: self.__basewidget_.imageList.addItem(QListWidgetItem(QIcon(pixmap), filename)) # Request the previews if config.DEBUG_GUI: print "--> Requesting previews" self.__scanner_.getPreviews(filenames) else: self.__unlock_() # # Methods connected to the UI # def __ui_refresh_clicked_(self): """Called when the user activates the refresh button This method clears the files list and request the current files list again """ # Refresh the folder contents self.__refreshPreviews_() def __ui_delete_clicked_(self): """Called when the user activates the delete button This method delete the current selected file """ if config.DEBUG_GUI: print "--> Deleting file" filename = self.currentFilename() if filename is not None: result = QMessageBox.question(self, "Confirmation of file deletion", "Do you really want to delete the file %s " \ "from the scanner?" % filename, QMessageBox.Yes, QMessageBox.No) if result == QMessageBox.Yes: self.__scanner_.deleteFile(filename) else: print "WARNING: No file selected (save), this should not happen" def __ui_save_clicked_(self): """Called when the user activates the save button This method ask for a filename and download the selected pages """ if config.DEBUG_GUI: print "--> Saving file" filename = self.currentFilename() # Check if a file has been selected if filename is not None: # Ask for filename save_filter = self.__get_format_filter_() default_save_filename = os.path.join(str(QDir.homePath()), "%s.%s" % (os.path.splitext(filename)[0], self.getFormat())) save_filename = str(QFileDialog.getSaveFileName(self, "Saving scanned file", default_save_filename, save_filter)) if save_filename != "": self.__lock_() # Add file format if not specified if os.path.splitext(save_filename)[1] == "": save_filename += ".%s" % self.getFormat() # Call the saving thread method format = self.getFormat() pages = self.getPages() dpi = self.getDpi() if dpi == None: dpi = self.__scanned_files_[filename]["dpi"] samplesize = self.getSamplesize() self.__scanner_.getFile(filename, save_filename, pages, format, dpi, samplesize) # Show the progress dialog self.__progress_.show(format, len(pages)) else: print "WARNING: No file selected (save), this should not happen" def __ui_folder_currentChanged_(self, folder): """Called when the current folder has been changed If the user has selected another directory, we need to list the contents of this directory """ if config.DEBUG_GUI: print "--> Changing folder" folder = str(folder) if folder != self.__old_folder_: self.__lock_() # Request the new folder self.__scanner_.setFolder(folder) def __ui_imageList_currentChanged_(self, filename): """Called when the user select an image in the image list @param filename: The file name of the selected file @type filename: str """ filename = str(filename) if config.DEBUG_GUI: print "--- Selected file: \"%s\"" % filename if filename == "": self.__basewidget_.info_nbPages.setText("") self.__basewidget_.info_dpi.setText("") self.__basewidget_.info_resolution.setText("") self.__clearOptions_() self.__basewidget_.delete.setEnabled(False) self.__basewidget_.save.setEnabled(False) self.__basewidget_.format.setEnabled(False) self.__basewidget_.page.setEnabled(False) self.__basewidget_.resolution.setEnabled(False) self.__basewidget_.color.setEnabled(False) else: file_infos = self.__scanned_files_[filename] # Show basic informations self.__basewidget_.info_nbPages.setText(str(file_infos["nbpages"])) self.__basewidget_.info_dpi.setText("%dx%d dpi" % \ (file_infos["dpi"][0], file_infos["dpi"][1])) self.__basewidget_.info_resolution.setText("%dx%d" % \ (file_infos["resolution"][0], file_infos["resolution"][1])) # Create file options self.__clearOptions_() # Add pages pages = [] if file_infos["nbpages"] > 1: pages.append("all") pages.extend([str(x) for x in range(1, file_infos["nbpages"] + 1)]) self.__basewidget_.page.addItems(pages) # Add dpi dpis = ["max"] dpis.extend(["%dx%d" % (x, x) for x in [100, 200, 300, 400, 600] if x <= file_infos["dpi"][0]]) self.__basewidget_.resolution.addItems(dpis) # Add samplesize if file_infos["samplesize"] == 24: self.__basewidget_.color.addItem("Color") if file_infos["samplesize"] >= 8: self.__basewidget_.color.addItem("Grayscale") self.__basewidget_.color.addItem("Black & White") # Enable buttons self.__basewidget_.delete.setEnabled(True) self.__basewidget_.save.setEnabled(True) # Enable options self.__basewidget_.format.setEnabled(True) self.__basewidget_.resolution.setEnabled(True) self.__basewidget_.color.setEnabled(True) self.__ui_format_currentChanged_(self.__basewidget_.format.currentText()) def __ui_format_currentChanged_(self, format): """Called when file format has changed If the file format is pdf, we cannot select a page. If it is not pdf, we need to enable the page selector """ format = str(format).lower() if format == "pdf": self.__basewidget_.page.setCurrentIndex(0) self.__basewidget_.page.setEnabled(False) else: self.__basewidget_.page.setEnabled(True) def __ui_progress_canceled_(self): """Called when the user click on the progress cancel button""" if config.DEBUG_GUI: print "--- Canceled saving" self.__scanner_.stop() # # Other methods # def __get_format_filter_(self): format = self.getFormat() if format == "tiff": filter = _("TIFF images (*.tif *.tiff)") elif format == "gif": filter = _("GIF images (*.gif)") elif format == "jpeg": filter = _("JPEG images (*.jpg *.jpeg)") elif format == "bmp": filter = _("BMP images (*.bmp)") elif format == "pdf": filter = _("PDF files (*.pdf)") else: filter = "" return filter + ";;All files (*)" def __add_black_border_(self, pixmap): """Add a black border around a pixmap @param pixmap: The pixmap @type pixmap: QPixmap """ painter = QPainter() painter.begin(pixmap) painter.setPen(Qt.black); painter.drawRect(QRect(0, 0, pixmap.width() - 1, pixmap.height() - 1)) painter.end() def __refreshPreviews_(self): if config.DEBUG_GUI: print "--> Refreshing previews" self.__basewidget_.imageList.clear() self.__lock_() self.__scanner_.getFilesList() def __clearOptions_(self): self.__basewidget_.page.clear() self.__basewidget_.resolution.clear() self.__basewidget_.color.clear() def __lock_(self): self.__basewidget_.refresh.setEnabled(False) self.__basewidget_.folder.setEnabled(False) self.__basewidget_.imageList.setEnabled(False) self.__basewidget_.save.setEnabled(False) self.__basewidget_.delete.setEnabled(False) self.__basewidget_.format.setEnabled(False) self.__basewidget_.page.setEnabled(False) self.__basewidget_.resolution.setEnabled(False) self.__basewidget_.color.setEnabled(False) def __unlock_(self): self.__basewidget_.refresh.setEnabled(True) self.__basewidget_.folder.setEnabled(True) self.__basewidget_.imageList.setEnabled(True) if self.currentFilename() is not None: self.__basewidget_.save.setEnabled(True) self.__basewidget_.delete.setEnabled(True) self.__basewidget_.format.setEnabled(True) self.__basewidget_.page.setEnabled(True) self.__basewidget_.resolution.setEnabled(True) self.__basewidget_.color.setEnabled(True) # # API public # def currentFilename(self): currentItem = self.__basewidget_.imageList.currentItem() # Vérification inutile, car le bouton delete est activé seulement # s'il y a un item sélectionné, mais on ne sais jamais if currentItem is not None: return str(currentItem.text()) def currentFolder(self): return str(self.__basewidget_.folder.currentText()) def getFormat(self): return str(self.__basewidget_.format.currentText()).lower() def getDpi(self): dpi = str(self.__basewidget_.resolution.currentText()) if dpi == "max": return None elif dpi == "100x100": return [100, 100] elif dpi == "200x200": return [200, 200] elif dpi == "300x300": return [300, 300] elif dpi == "400x400": return [400, 400] elif dpi == "600x600": return [600, 600] def getPages(self): if self.getFormat() == "pdf": return [] if str(self.__basewidget_.page.currentText()) == "all": return [x for x in range(1, self.__scanned_files_[self.currentFilename()]["nbpages"] + 1)] else: return [int(str(self.__basewidget_.page.currentText()))] def getSamplesize(self): samplesize = str(self.__basewidget_.color.currentText()) # 24 bits color if samplesize == "Color": return 24 # 8 tones grayscale elif samplesize == "Grayscale": return 8 # black and white else: return 1 def connectToScanner(self, host, port): if config.DEBUG_GUI: print "--> Connecting to scanner" self.__scanner_.connectToScanner(host, port) def disconnect(self): if config.DEBUG_GUI: print "--> Disconnecting from scanner" self.__scanner_.disconnect()
'''synthesize structurally interesting change history This extension is useful for creating a repository with properties that are statistically similar to an existing repository. During analysis, a simple probability table is constructed from the history of an existing repository. During synthesis, these properties are reconstructed. Properties that are analyzed and synthesized include the following: - Lines added or removed when an existing file is modified - Number and sizes of files added - Number of files removed - Line lengths - Topological distance to parent changeset(s) - Probability of a commit being a merge - Probability of a newly added file being added to a new directory - Interarrival time, and time zone, of commits - Number of files in each directory A few obvious properties that are not currently handled realistically: - Merges are treated as regular commits with two parents, which is not realistic - Modifications are not treated as operations on hunks of lines, but as insertions and deletions of randomly chosen single lines - Committer ID (always random) - Executability of files - Symlinks and binary files are ignored ''' from __future__ import absolute_import import bisect import collections import itertools import json import os import random import sys import time from mercurial.i18n import _ from mercurial.node import ( nullid, nullrev, short, ) from mercurial import ( cmdutil, context, error, hg, patch, scmutil, util, ) testedwith = 'internal' cmdtable = {} command = cmdutil.command(cmdtable) newfile = set(('new fi', 'rename', 'copy f', 'copy t')) def zerodict(): return collections.defaultdict(lambda: 0) def roundto(x, k): if x > k * 2: return int(round(x / float(k)) * k) return int(round(x)) def parsegitdiff(lines): filename, mar, lineadd, lineremove = None, None, zerodict(), 0 binary = False for line in lines: start = line[:6] if start == 'diff -': if filename: yield filename, mar, lineadd, lineremove, binary mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False filename = patch.gitre.match(line).group(1) elif start in newfile: mar = 'a' elif start == 'GIT bi': binary = True elif start == 'delete': mar = 'r' elif start: s = start[0] if s == '-' and not line.startswith('--- '): lineremove += 1 elif s == '+' and not line.startswith('+++ '): lineadd[roundto(len(line) - 1, 5)] += 1 if filename: yield filename, mar, lineadd, lineremove, binary @command('analyze', [('o', 'output', '', _('write output to given file'), _('FILE')), ('r', 'rev', [], _('analyze specified revisions'), _('REV'))], _('hg analyze'), optionalrepo=True) def analyze(ui, repo, *revs, **opts): '''create a simple model of a repository to use for later synthesis This command examines every changeset in the given range (or all of history if none are specified) and creates a simple statistical model of the history of the repository. It also measures the directory structure of the repository as checked out. The model is written out to a JSON file, and can be used by :hg:`synthesize` to create or augment a repository with synthetic commits that have a structure that is statistically similar to the analyzed repository. ''' root = repo.root if not root.endswith(os.path.sep): root += os.path.sep revs = list(revs) revs.extend(opts['rev']) if not revs: revs = [':'] output = opts['output'] if not output: output = os.path.basename(root) + '.json' if output == '-': fp = sys.stdout else: fp = open(output, 'w') # Always obtain file counts of each directory in the given root directory. def onerror(e): ui.warn(_('error walking directory structure: %s\n') % e) dirs = {} rootprefixlen = len(root) for dirpath, dirnames, filenames in os.walk(root, onerror=onerror): dirpathfromroot = dirpath[rootprefixlen:] dirs[dirpathfromroot] = len(filenames) if '.hg' in dirnames: dirnames.remove('.hg') lineschanged = zerodict() children = zerodict() p1distance = zerodict() p2distance = zerodict() linesinfilesadded = zerodict() fileschanged = zerodict() filesadded = zerodict() filesremoved = zerodict() linelengths = zerodict() interarrival = zerodict() parents = zerodict() dirsadded = zerodict() tzoffset = zerodict() # If a mercurial repo is available, also model the commit history. if repo: revs = scmutil.revrange(repo, revs) revs.sort() progress = ui.progress _analyzing = _('analyzing') _changesets = _('changesets') _total = len(revs) for i, rev in enumerate(revs): progress(_analyzing, i, unit=_changesets, total=_total) ctx = repo[rev] pl = ctx.parents() pctx = pl[0] prev = pctx.rev() children[prev] += 1 p1distance[rev - prev] += 1 parents[len(pl)] += 1 tzoffset[ctx.date()[1]] += 1 if len(pl) > 1: p2distance[rev - pl[1].rev()] += 1 if prev == rev - 1: lastctx = pctx else: lastctx = repo[rev - 1] if lastctx.rev() != nullrev: timedelta = ctx.date()[0] - lastctx.date()[0] interarrival[roundto(timedelta, 300)] += 1 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), []) fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff): if isbin: continue added = sum(lineadd.itervalues(), 0) if mar == 'm': if added and lineremove: lineschanged[roundto(added, 5), roundto(lineremove, 5)] += 1 filechanges += 1 elif mar == 'a': fileadds += 1 if '/' in filename: filedir = filename.rsplit('/', 1)[0] if filedir not in pctx.dirs(): diradds += 1 linesinfilesadded[roundto(added, 5)] += 1 elif mar == 'r': fileremoves += 1 for length, count in lineadd.iteritems(): linelengths[length] += count fileschanged[filechanges] += 1 filesadded[fileadds] += 1 dirsadded[diradds] += 1 filesremoved[fileremoves] += 1 invchildren = zerodict() for rev, count in children.iteritems(): invchildren[count] += 1 if output != '-': ui.status(_('writing output to %s\n') % output) def pronk(d): return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) json.dump({'revs': len(revs), 'initdirs': pronk(dirs), 'lineschanged': pronk(lineschanged), 'children': pronk(invchildren), 'fileschanged': pronk(fileschanged), 'filesadded': pronk(filesadded), 'linesinfilesadded': pronk(linesinfilesadded), 'dirsadded': pronk(dirsadded), 'filesremoved': pronk(filesremoved), 'linelengths': pronk(linelengths), 'parents': pronk(parents), 'p1distance': pronk(p1distance), 'p2distance': pronk(p2distance), 'interarrival': pronk(interarrival), 'tzoffset': pronk(tzoffset), }, fp) fp.close() @command('synthesize', [('c', 'count', 0, _('create given number of commits'), _('COUNT')), ('', 'dict', '', _('path to a dictionary of words'), _('FILE')), ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))], _('hg synthesize [OPTION].. DESCFILE')) def synthesize(ui, repo, descpath, **opts): '''synthesize commits based on a model of an existing repository The model must have been generated by :hg:`analyze`. Commits will be generated randomly according to the probabilities described in the model. If --initfiles is set, the repository will be seeded with the given number files following the modeled repository's directory structure. When synthesizing new content, commit descriptions, and user names, words will be chosen randomly from a dictionary that is presumed to contain one word per line. Use --dict to specify the path to an alternate dictionary to use. ''' try: fp = hg.openpath(ui, descpath) except Exception as err: raise error.Abort('%s: %s' % (descpath, err[0].strerror)) desc = json.load(fp) fp.close() def cdf(l): if not l: return [], [] vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True)) t = float(sum(probs, 0)) s, cdfs = 0, [] for v in probs: s += v cdfs.append(s / t) return vals, cdfs lineschanged = cdf(desc['lineschanged']) fileschanged = cdf(desc['fileschanged']) filesadded = cdf(desc['filesadded']) dirsadded = cdf(desc['dirsadded']) filesremoved = cdf(desc['filesremoved']) linelengths = cdf(desc['linelengths']) parents = cdf(desc['parents']) p1distance = cdf(desc['p1distance']) p2distance = cdf(desc['p2distance']) interarrival = cdf(desc['interarrival']) linesinfilesadded = cdf(desc['linesinfilesadded']) tzoffset = cdf(desc['tzoffset']) dictfile = opts.get('dict') or '/usr/share/dict/words' try: fp = open(dictfile, 'rU') except IOError as err: raise error.Abort('%s: %s' % (dictfile, err.strerror)) words = fp.read().splitlines() fp.close() initdirs = {} if desc['initdirs']: for k, v in desc['initdirs']: initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v initdirs = renamedirs(initdirs, words) initdirscdf = cdf(initdirs) def pick(cdf): return cdf[0][bisect.bisect_left(cdf[1], random.random())] def pickpath(): return os.path.join(pick(initdirscdf), random.choice(words)) def makeline(minimum=0): total = max(minimum, pick(linelengths)) c, l = 0, [] while c < total: w = random.choice(words) c += len(w) + 1 l.append(w) return ' '.join(l) wlock = repo.wlock() lock = repo.lock() nevertouch = set(('.hgsub', '.hgignore', '.hgtags')) progress = ui.progress _synthesizing = _('synthesizing') _files = _('initial files') _changesets = _('changesets') # Synthesize a single initial revision adding files to the repo according # to the modeled directory structure. initcount = int(opts['initfiles']) if initcount and initdirs: pctx = repo[None].parents()[0] dirs = set(pctx.dirs()) files = {} def validpath(path): # Don't pick filenames which are already directory names. if path in dirs: return False # Don't pick directories which were used as file names. while path: if path in files: return False path = os.path.dirname(path) return True for i in xrange(0, initcount): ui.progress(_synthesizing, i, unit=_files, total=initcount) path = pickpath() while not validpath(path): path = pickpath() data = '%s contents\n' % path files[path] = context.memfilectx(repo, path, data) dir = os.path.dirname(path) while dir and dir not in dirs: dirs.add(dir) dir = os.path.dirname(dir) def filectxfn(repo, memctx, path): return files[path] ui.progress(_synthesizing, None) message = 'synthesized wide repo with %d files' % (len(files),) mc = context.memctx(repo, [pctx.node(), nullid], message, files.iterkeys(), filectxfn, ui.username(), '%d %d' % util.makedate()) initnode = mc.commit() if ui.debugflag: hexfn = hex else: hexfn = short ui.status(_('added commit %s with %d files\n') % (hexfn(initnode), len(files))) # Synthesize incremental revisions to the repository, adding repo depth. count = int(opts['count']) heads = set(map(repo.changelog.rev, repo.heads())) for i in xrange(count): progress(_synthesizing, i, unit=_changesets, total=count) node = repo.changelog.node revs = len(repo) def pickhead(heads, distance): if heads: lheads = sorted(heads) rev = revs - min(pick(distance), revs) if rev < lheads[-1]: rev = lheads[bisect.bisect_left(lheads, rev)] else: rev = lheads[-1] return rev, node(rev) return nullrev, nullid r1 = revs - min(pick(p1distance), revs) p1 = node(r1) # the number of heads will grow without bound if we use a pure # model, so artificially constrain their proliferation toomanyheads = len(heads) > random.randint(1, 20) if p2distance[0] and (pick(parents) == 2 or toomanyheads): r2, p2 = pickhead(heads.difference([r1]), p2distance) else: r2, p2 = nullrev, nullid pl = [p1, p2] pctx = repo[r1] mf = pctx.manifest() mfk = mf.keys() changes = {} if mfk: for __ in xrange(pick(fileschanged)): for __ in xrange(10): fctx = pctx.filectx(random.choice(mfk)) path = fctx.path() if not (path in nevertouch or fctx.isbinary() or 'l' in fctx.flags()): break lines = fctx.data().splitlines() add, remove = pick(lineschanged) for __ in xrange(remove): if not lines: break del lines[random.randrange(0, len(lines))] for __ in xrange(add): lines.insert(random.randint(0, len(lines)), makeline()) path = fctx.path() changes[path] = context.memfilectx(repo, path, '\n'.join(lines) + '\n') for __ in xrange(pick(filesremoved)): path = random.choice(mfk) for __ in xrange(10): path = random.choice(mfk) if path not in changes: changes[path] = None break if filesadded: dirs = list(pctx.dirs()) dirs.insert(0, '') for __ in xrange(pick(filesadded)): pathstr = '' while pathstr in dirs: path = [random.choice(dirs)] if pick(dirsadded): path.append(random.choice(words)) path.append(random.choice(words)) pathstr = '/'.join(filter(None, path)) data = '\n'.join(makeline() for __ in xrange(pick(linesinfilesadded))) + '\n' changes[pathstr] = context.memfilectx(repo, pathstr, data) def filectxfn(repo, memctx, path): return changes[path] if not changes: continue if revs: date = repo['tip'].date()[0] + pick(interarrival) else: date = time.time() - (86400 * count) # dates in mercurial must be positive, fit in 32-bit signed integers. date = min(0x7fffffff, max(0, date)) user = random.choice(words) + '@' + random.choice(words) mc = context.memctx(repo, pl, makeline(minimum=2), sorted(changes.iterkeys()), filectxfn, user, '%d %d' % (date, pick(tzoffset))) newnode = mc.commit() heads.add(repo.changelog.rev(newnode)) heads.discard(r1) heads.discard(r2) lock.release() wlock.release() def renamedirs(dirs, words): '''Randomly rename the directory names in the per-dir file count dict.''' wordgen = itertools.cycle(words) replacements = {'': ''} def rename(dirpath): '''Recursively rename the directory and all path prefixes. The mapping from path to renamed path is stored for all path prefixes as in dynamic programming, ensuring linear runtime and consistent renaming regardless of iteration order through the model. ''' if dirpath in replacements: return replacements[dirpath] head, _ = os.path.split(dirpath) if head: head = rename(head) else: head = '' renamed = os.path.join(head, next(wordgen)) replacements[dirpath] = renamed return renamed result = [] for dirpath, count in dirs.iteritems(): result.append([rename(dirpath.lstrip(os.sep)), count]) return result
from Xml.Xslt import test_harness sheet_str = """<?xml version="1.0"?> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:template match="/"> <root> <xsl:apply-templates/> </root> </xsl:template> <xsl:template name="do-the-rest"> <xsl:param name="start"/> <xsl:param name="count"/> <tr> <xsl:for-each select="item[position()&gt;=$start and position()&lt;$start+$count]"> <td> <xsl:value-of select="."/> </td> </xsl:for-each> </tr> <xsl:if test="$start + $count - 1 &lt; count(child::item)"> <xsl:call-template name="do-the-rest"> <xsl:with-param name="start" select="$start + $count"/> <xsl:with-param name="count" select="$count"/> </xsl:call-template> </xsl:if> </xsl:template> <xsl:template match="data"> <xsl:call-template name="do-the-rest"> <xsl:with-param name="start" select="1"/> <xsl:with-param name="count" select="2"/> </xsl:call-template> </xsl:template> </xsl:stylesheet> """ source_str = """<?xml version = "1.0"?> <data> <item>b</item> <item>a</item> <item>d</item> <item>c</item> </data> """ expected = """<?xml version='1.0' encoding='UTF-8'?> <root><tr><td>b</td><td>a</td></tr><tr><td>d</td><td>c</td></tr></root>""" def Test(tester): source = test_harness.FileInfo(string=source_str) sheet = test_harness.FileInfo(string=sheet_str) test_harness.XsltTest(tester, source, [sheet], expected, title='xsl:call-template') return
import sys import subprocess import os import optparse import tempfile import shutil from urlgrabber import grabber import time import mockbuild.util __VERSION__ = "unreleased_version" SYSCONFDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "..", "etc") PYTHONDIR = os.path.dirname(os.path.realpath(sys.argv[0])) PKGPYTHONDIR = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "mockbuild") MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock") mockconfig_path='/etc/mock' def createrepo(path): if os.path.exists(path + '/repodata/repomd.xml'): comm = ['/usr/bin/createrepo', '--update', path] else: comm = ['/usr/bin/createrepo', path] cmd = subprocess.Popen(comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = cmd.communicate() return out, err def parse_args(args): parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]') parser.add_option('-r', '--root', default=None, dest='chroot', help="chroot config name/base to use in the mock build") parser.add_option('-l', '--localrepo', default=None, help="local path for the local repo, defaults to making its own") parser.add_option('-c', '--continue', default=False, action='store_true', dest='cont', help="if a pkg fails to build, continue to the next one") parser.add_option('-a','--addrepo', default=[], action='append', dest='repos', help="add these repo baseurls to the chroot's yum config") parser.add_option('--recurse', default=False, action='store_true', help="if more than one pkg and it fails to build, try to build the rest and come back to it") parser.add_option('--log', default=None, dest='logfile', help="log to the file named by this option, defaults to not logging") parser.add_option('--tmp_prefix', default=None, dest='tmp_prefix', help="tmp dir prefix - will default to username-pid if not specified") #FIXME? # figure out how to pass other args to mock? opts, args = parser.parse_args(args) if opts.recurse: opts.cont = True if not opts.chroot: print "You must provide an argument to -r for the mock chroot" sys.exit(1) if len(sys.argv) < 3: print "You must specifiy at least 1 package to build" sys.exit(1) return opts, args def add_local_repo(infile, destfile, baseurl, repoid=None): """take a mock chroot config and add a repo to it's yum.conf infile = mock chroot config file destfile = where to save out the result baseurl = baseurl of repo you wish to add""" global config_opts try: execfile(infile) if not repoid: repoid=baseurl.split('//')[1].replace('/','_') localyumrepo=""" [%s] name=%s baseurl=%s enabled=1 skip_if_unavailable=1 metadata_expire=30 cost=1 """ % (repoid, baseurl, baseurl) config_opts['yum.conf'] += localyumrepo br_dest = open(destfile, 'w') for k,v in config_opts.items(): br_dest.write("config_opts[%r] = %r\n" % (k, v)) br_dest.close() return True, '' except (IOError, OSError): return False, "Could not write mock config to %s" % destfile return True, '' def do_build(opts, cfg, pkg): # returns 0, cmd, out, err = failure # returns 1, cmd, out, err = success # returns 2, None, None, None = already built s_pkg = os.path.basename(pkg) pdn = s_pkg.replace('.src.rpm', '') resdir = '%s/%s' % (opts.local_repo_dir, pdn) resdir = os.path.normpath(resdir) if not os.path.exists(resdir): os.makedirs(resdir) success_file = resdir + '/success' fail_file = resdir + '/fail' if os.path.exists(success_file): return 2, None, None, None # clean it up if we're starting over :) if os.path.exists(fail_file): os.unlink(fail_file) mockcmd = ['/usr/bin/mock', '--configdir', opts.config_path, '--resultdir', resdir, '--uniqueext', opts.uniqueext, '-r', cfg, ] print 'building %s' % s_pkg mockcmd.append(pkg) cmd = subprocess.Popen(mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = cmd.communicate() if cmd.returncode == 0: open(success_file, 'w').write('done\n') ret = 1 else: open(fail_file, 'w').write('undone\n') ret = 0 return ret, cmd, out, err def log(lf, msg): if lf: now = time.time() try: open(lf, 'a').write(str(now) + ':' + msg + '\n') except (IOError, OSError), e: print 'Could not write to logfile %s - %s' % (lf, str(e)) print msg config_opts = {} def main(args): global config_opts config_opts = mockbuild.util.setup_default_config_opts(os.getgid(), __VERSION__, PKGPYTHONDIR) opts, args = parse_args(args) # take mock config + list of pkgs cfg=opts.chroot pkgs=args[1:] mockcfg = mockconfig_path + '/' + cfg + '.cfg' if not os.path.exists(mockcfg): print "could not find config: %s" % mockcfg sys.exit(1) if not opts.tmp_prefix: try: opts.tmp_prefix = os.getlogin() except OSError, e: print "Could not find login name for tmp dir prefix add --tmp_prefix" sys.exit(1) pid = os.getpid() opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid) # create a tempdir for our local info if opts.localrepo: local_tmp_dir = os.path.abspath(opts.localrepo) if not os.path.exists(local_tmp_dir): os.makedirs(local_tmp_dir) else: pre = 'mock-chain-%s-' % opts.uniqueext local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp') os.chmod(local_tmp_dir, 0755) if opts.logfile: opts.logfile = os.path.join(local_tmp_dir, opts.logfile) if os.path.exists(opts.logfile): os.unlink(opts.logfile) log(opts.logfile, "starting logfile: %s" % opts.logfile) opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + cfg + '/') if not os.path.exists(opts.local_repo_dir): os.makedirs(opts.local_repo_dir, mode=0755) local_baseurl="file://%s" % opts.local_repo_dir log(opts.logfile, "results dir: %s" % opts.local_repo_dir) opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + cfg + '/') if not os.path.exists(opts.config_path): os.makedirs(opts.config_path, mode=0755) log(opts.logfile, "config dir: %s" % opts.config_path) my_mock_config = opts.config_path + '/' + os.path.basename(mockcfg) # modify with localrepo res, msg = add_local_repo(mockcfg, my_mock_config, local_baseurl, 'local_build_repo') if not res: log(opts.logfile, "Error: Could not write out local config: %s" % msg) sys.exit(1) for baseurl in opts.repos: res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl) if not res: log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg)) sys.exit(1) # these files needed from the mock.config dir to make mock run for fn in ['site-defaults.cfg', 'logging.ini']: pth = mockconfig_path + '/' + fn shutil.copyfile(pth, opts.config_path + '/' + fn) # createrepo on it out, err = createrepo(opts.local_repo_dir) if err.strip(): log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) log(opts.logfile, "Err: %s" % err) sys.exit(1) download_dir = tempfile.mkdtemp() downloaded_pkgs = {} built_pkgs = [] try_again = True to_be_built = pkgs while try_again: failed = [] for pkg in to_be_built: if not pkg.endswith('.rpm'): log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg) failed.append(pkg) continue elif pkg.startswith('http://') or pkg.startswith('https://'): url = pkg cwd = os.getcwd() os.chdir(download_dir) try: log(opts.logfile, 'Fetching %s' % url) ug = grabber.URLGrabber() fn = ug.urlgrab(url) pkg = download_dir + '/' + fn except Exception, e: log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e))) failed.append(url) os.chdir(cwd) continue else: os.chdir(cwd) downloaded_pkgs[pkg] = url log(opts.logfile, "Start build: %s" % pkg) ret, cmd, out, err = do_build(opts, cfg, pkg) log(opts.logfile, "End build: %s" % pkg) if ret == 0: if opts.recurse: failed.append(pkg) log(opts.logfile, "Error building %s, will try again" % os.path.basename(pkg)) else: log(opts.logfile,"Error building %s" % os.path.basename(pkg)) log(opts.logfile,"See logs/results in %s" % opts.local_repo_dir) if not opts.cont: sys.exit(1) elif ret == 1: log(opts.logfile, "Success building %s" % os.path.basename(pkg)) built_pkgs.append(pkg) # createrepo with the new pkgs out, err = createrepo(opts.local_repo_dir) if err.strip(): log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir) log(opts.logfile, "Err: %s" % err) elif ret == 2: log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg)) if failed: if len(failed) != len(to_be_built): to_be_built = failed try_again = True log(opts.logfile, 'Trying to rebuild %s failed pkgs' % len(failed)) else: log(opts.logfile, "Tried twice - following pkgs could not be successfully built:") for pkg in failed: msg = pkg if pkg in downloaded_pkgs: msg = downloaded_pkgs[pkg] log(opts.logfile, msg) try_again = False else: try_again = False # cleaning up our download dir shutil.rmtree(download_dir, ignore_errors=True) log(opts.logfile, "Results out to: %s" % opts.local_repo_dir) log(opts.logfile, "Pkgs built: %s" % len(built_pkgs)) log(opts.logfile, "Packages successfully built in this order:") for pkg in built_pkgs: log(opts.logfile, pkg) if __name__ == "__main__": main(sys.argv) sys.exit(0)
import gettext _ = gettext.translation('yali', fallback=True).ugettext from PyQt5.Qt import QWidget, pyqtSignal, QVariant import yali.util import yali.localedata import yali.postinstall import yali.context as ctx from yali.gui import ScreenWidget from yali.gui.Ui.keyboardwidget import Ui_KeyboardWidget class Widget(QWidget, ScreenWidget): name = "keyboardSetup" def __init__(self): QWidget.__init__(self) self.ui = Ui_KeyboardWidget() self.ui.setupUi(self) index = 0 # comboBox.addItem doesn't increase the currentIndex self.default_layout_index = None locales = sorted([(country, data) for country, data in yali.localedata.locales.items()]) for country, data in locales: if data["xkbvariant"]: i = 0 for variant in data["xkbvariant"]: _d = dict(data) _d["xkbvariant"] = variant[0] _d["name"] = variant[1] _d["consolekeymap"] = data["consolekeymap"][i] self.ui.keyboard_list.addItem(_d["name"], QVariant(_d)) i += 1 else: self.ui.keyboard_list.addItem(data["name"], QVariant(data)) if ctx.consts.lang == country: if ctx.consts.lang == "tr": self.default_layout_index = index + 1 else: self.default_layout_index = index index += 1 self.ui.keyboard_list.setCurrentIndex(self.default_layout_index) self.ui.keyboard_list.currentIndexChanged[int].connect(self.slotLayoutChanged) def shown(self): self.slotLayoutChanged() def slotLayoutChanged(self): index = self.ui.keyboard_list.currentIndex() keymap = self.ui.keyboard_list.itemData(index)#.toMap() # Gökmen's converter keymap = dict(map(lambda x: (str(x[0]), unicode(x[1])), keymap.iteritems())) ctx.installData.keyData = keymap ctx.interface.informationWindow.hide() if "," in keymap["xkblayout"]: message = _("Use Alt-Shift to toggle between alternative keyboard layouts") ctx.interface.informationWindow.update(message, type="warning") else: ctx.interface.informationWindow.hide() yali.util.setKeymap(keymap["xkblayout"], keymap["xkbvariant"]) def execute(self): ctx.interface.informationWindow.hide() ctx.logger.debug("Selected keymap is : %s" % ctx.installData.keyData["name"]) return True
""" urlresolver Kodi plugin Copyright (C) 2011 t0mm0 Updated by Gujal (C) 2016 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from urlresolver import common from urlresolver.resolver import UrlResolver, ResolverError class NowvideoResolver(UrlResolver): name = "nowvideo" domains = ['nowvideo.eu', 'nowvideo.ch', 'nowvideo.sx', 'nowvideo.co', 'nowvideo.li', 'nowvideo.fo', 'nowvideo.at', 'nowvideo.ec'] pattern = '(?://|\.)(nowvideo\.(?:eu|ch|sx|co|li|fo|at|ec))/(?:video/|embed\.php\?\S*v=)([A-Za-z0-9]+)' def __init__(self): self.net = common.Net() def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) stream_url = '' html = self.net.http_GET(web_url).content try: r = re.search('flashvars.filekey=(.+?);', html) if r: r = r.group(1) try: filekey = re.compile('\s+%s="(.+?)"' % r).findall(html)[-1] except: filekey = r player_url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, media_id) html = self.net.http_GET(player_url).content r = re.search('url=(.+?)&', html) if r: stream_url = r.group(1) else: raise ResolverError('File Not Found or removed') except: print "no embedded urls found using first method" try: r = re.search('id="player".*?src="(.*?)"', html, re.DOTALL) if r: stream_url = r.group(1) except: print "no embedded urls found using second method" if stream_url: return '%s%s' % (stream_url, '|Referer=' + web_url) else: raise ResolverError('File Not Found or removed') def get_url(self, host, media_id): return 'http://embed.nowvideo.sx/embed/?v=%s' % media_id
import sys, os my_site = os.path.join(os.environ["HOME"], ".local/lib/python2.7/site-packages") sys.path.insert(0, my_site) import h5py import networkx as nx import numpy as np import pycuda.driver as cuda import scipy.stats as st import sys import aux from consts import * def to_graph(connections): graph = nx.DiGraph() ca_size = connections.shape[0] for cell in xrange(ca_size): for neighbor in connections[cell]: graph.add_edge(neighbor, cell) # Count the number of rewired connection this cell has graph.node[cell]['rew'] = (connections[cell] != (np.arange(cell - 3, cell + 4) % ca_size)).sum() return graph class AnalysisIndividual: __cuda_module = False def __init__(self, individual, correct, executions, ca_size, connection_radius, ca_iterations, ca_repeat, k_history, save_executions=0): self.__ca_size = ca_size self.__connection_radius = connection_radius self.__n_connections = 2 * self.__connection_radius + 1 self.__ca_iterations = ca_iterations self.__ca_repeat = ca_repeat self.__k_history = k_history self.__n_possible_history = 2 ** self.__k_history self.__n_observations = self.__ca_repeat * \ (self.__ca_iterations - self.__k_history + 1) self.__save_executions = save_executions self.__individual = individual self.__individual_number = self.__individual.number self.__rules = self.__individual.gene_rules self.__connections = self.__individual.connections self.__graph = to_graph(self.__connections) self.__executions = executions density = np.mean(self.__executions[:, 0], axis=1) self.__majority = np.round(density).astype(np.uint32) # The closer the density is to .5 the harder the configuration is to # decide self.__difficult = 1 - np.abs(density - .5) / .5 # Checking which repetitions converged to a single state self.__converged = np.all(self.__executions[:, -1] == self.__executions[:, -1, 0].reshape(-1, 1), axis=1) # Checking how many cells in each repetition converged to the right # state self.__cells_correct = np.mean(self.__executions[:, -1] == self.__majority.reshape(-1, 1), axis=1) self.__correct = correct self.__fitness = np.mean(self.__correct) self.__gini = None self.__limits = None self.__entropy_rate = None self.__base_table = None self.__correlations = None # Initialize the CUDA module if not AnalysisIndividual.__cuda_module: AnalysisIndividual.__cuda_module = True cuda_module = aux.CudaModule('analysis.cu', (self.__ca_size, self.__ca_iterations, self.__ca_repeat, self.__connection_radius, self.__n_connections, self.__n_observations, self.__k_history, self.__n_possible_history)) AnalysisIndividual.__kernel_calc_diffs = \ cuda_module.get_function("kernel_calc_diffs") AnalysisIndividual.__kernel_probabilities = \ cuda_module.get_function("kernel_probabilities") AnalysisIndividual.__kernel_active_storage = \ cuda_module.get_function("kernel_active_storage") AnalysisIndividual.__kernel_entropy_rate = \ cuda_module.get_function("kernel_entropy_rate") def __calculate_gini(self, values): # Calculate the Gini coefficient to measure the inequality in a # distribution of values cum_values = np.sort(values).cumsum() return 1 - (cum_values[0] + (cum_values[1:] + cum_values[:-1]).sum()) \ / float(cum_values[-1] * cum_values.size) def __get_limits(self): # This function implements a heuristic to calculate how many times a # cell has the role of "limit" of a diffusion in a simulation. # The main idea here is that, usually, information in cellular automata # flows in a given direction at a constant speed. If we know this # direction and speed, we can check how many times a cell interrupts a # flow. sum_diffs = np.zeros(self.__ca_size, dtype=np.uint32) try: self.__kernel_calc_diffs(cuda.In(self.__majority), cuda.In(self.__executions), cuda.InOut(sum_diffs), block=(self.__ca_size, 1, 1), grid=(1,)) cuda.Context.synchronize() except cuda.Error as e: sys.exit("CUDA: Execution failed ('%s')!" % e) # For all repetitions, calculate the ratio of total iterations each # cell acted as a "limit" self.__limits = sum_diffs / \ float(self.__ca_repeat * self.__ca_iterations) def get_individual_info(self): if self.__gini != None: # If all metrics are already computed, just return them! return self.__fitness, self.__gini, self.__prop_max_min, \ self.__individual.epoch, self.__individual_number, \ self.__clustering, self.__average_k_neigh, \ self.__average_shortest_path, self.__diameter self.__get_limits() self.__gini = self.__calculate_gini(self.__limits) self.__prop_max_min = self.__limits.max() / self.__limits.min() # As clustering coefficient is not defined for directed graphs, we # convert the graph to its undirected version self.__clustering = nx.average_clustering(nx.Graph(self.__graph)) self.__average_shortest_path = \ nx.average_shortest_path_length(self.__graph) try: self.__diameter = nx.diameter(self.__graph) except nx.exception.NetworkXError: self.__diameter = float('nan') self.__convergence = np.mean(self.__converged) table_individual = { # Serial number "i_num": np.array([self.__individual_number], dtype=np.int), # Individual fitness "fit": np.array([self.__fitness], dtype=np.float), # Ratio of the repetitions that converged to a single state "conv": np.array([self.__convergence], dtype=np.float), # gini and max_min are metrics intended to measure the inequality # in the number of times each cell is a "limit" "gini": np.array([self.__gini], dtype=np.float), "max_min": np.array([self.__prop_max_min], dtype=np.float), # Epoch in the evolution "epoch": np.array([self.__individual.epoch], dtype=np.float), # Clustering coefficient "clust": np.array([self.__clustering], dtype=np.float), # Average shortests path between each pair of cells "short": np.array([self.__average_shortest_path], dtype=np.float), # Maximum distance between any two cells "diam": np.array([self.__diameter], dtype=np.float)} return table_individual def __get_probs_entropy(self): # Calculate information theoretical metrics to evaluate the # computational role of each cell if self.__entropy_rate != None: # If all metrics are already computed, just return them! return self.__entropy_rate, self.__active_storage, \ self.__cond_entropy p_joint_table = np.zeros((self.__ca_size, self.__n_possible_history, 2), dtype=np.float32) p_prev_table = np.zeros((self.__ca_size, self.__n_possible_history), dtype=np.float32) p_curr_table = np.zeros((self.__ca_size, 2), dtype=np.float32) try: self.__kernel_probabilities(cuda.In(self.__executions), cuda.InOut(p_joint_table), cuda.InOut(p_prev_table), cuda.InOut(p_curr_table), block=(self.__ca_size, 1, 1), grid=(self.__ca_repeat, 1, 1)) cuda.Context.synchronize() except cuda.Error as e: sys.exit("CUDA: Execution failed!\n'%s'" % e) # The entropy rate is a measure of the uncertainty in a cell's state # given its past self.__entropy_rate = np.zeros(self.__ca_size, dtype=np.float32) # The active information storage is the amount of past information # currently in use by a cell, i.e., its memory self.__active_storage = np.zeros(self.__ca_size, dtype=np.float32) try: self.__kernel_entropy_rate(cuda.In(p_joint_table), cuda.In(p_prev_table), cuda.InOut(self.__entropy_rate), block=(self.__ca_size, 1, 1)) cuda.Context.synchronize() for i in xrange(self.__ca_iterations - self.__k_history): ca_aux = np.array(self.__executions[:, i:i + self.__k_history + 1, :]) self.__kernel_active_storage(cuda.In(ca_aux), cuda.In(p_joint_table), cuda.In(p_prev_table), cuda.In(p_curr_table), cuda.InOut(self.__active_storage), block=(self.__ca_size, 1, 1), grid=(self.__ca_repeat, 1, 1)) cuda.Context.synchronize() except cuda.Error as e: sys.exit("CUDA: Execution failed!\n'%s'" % e) aux = np.multiply(p_joint_table, np.log2(np.divide(p_prev_table. reshape(p_prev_table.shape + (1,)), p_joint_table))) aux[p_joint_table == 0] = 0 self.__cond_entropy = np.sum(aux, axis=(1, 2)) / self.__n_observations return self.__entropy_rate, self.__active_storage, self.__cond_entropy def get_cells_info(self): self.__get_limits() self.__get_probs_entropy() full_data = { "lim": self.__limits, "ent_rt": self.__entropy_rate, "act_st": self.__active_storage, "cond_ent": self.__cond_entropy} if self.__base_table == None: # Calculate graph measures order = sorted(self.__graph.nodes()) pagerank = nx.pagerank(self.__graph) pagerank = np.array([pagerank[k] for k in order], dtype=np.float) try: hubs, authorities = nx.hits(self.__graph, 1000) hubs = np.array([hubs[k] for k in order], dtype=np.float) authorities = np.array([authorities[k] for k in order], dtype=np.float) except nx.exception.NetworkXError: hubs = np.repeat(float('nan'), self.__ca_size).astype(np.float) authorities = hubs try: eccentricity = nx.eccentricity(self.__graph) eccentricity = np.array([eccentricity[k] for k in order], dtype=np.float) except nx.exception.NetworkXError: eccentricity = np.repeat(float('nan'), self.__ca_size). \ astype(np.float) closeness = nx.closeness_centrality(self.__graph) closeness = np.array([closeness[k] for k in order], dtype=np.float) closeness_reverse = nx.closeness_centrality( self.__graph.reverse(True)) closeness_reverse = np.array([closeness_reverse[k] for k in order], dtype=np.float) betweenness = nx.betweenness_centrality(self.__graph) betweenness = np.array([betweenness[k] for k in order], dtype=np.float) try: eigenvector = nx.eigenvector_centrality(self.__graph, 1000) eigenvector = np.array([eigenvector[k] for k in order], dtype=np.float) except nx.exception.NetworkXError: eigenvector = np.repeat(float('nan'), self.__ca_size). \ astype(np.float) load = nx.load_centrality(self.__graph) load = np.array([load[k] for k in order], dtype=np.float) clustering = nx.clustering(nx.Graph(self.__graph)) clustering = np.array([clustering[k] for k in order], dtype=np.float) in_degree = nx.in_degree_centrality(self.__graph) in_degree = np.array([in_degree[k] for k in order], dtype=np.float) out_degree = nx.out_degree_centrality(self.__graph) out_degree = np.array([out_degree[k] for k in order], dtype=np.float) rewires = np.array([self.__graph.node[k]['rew'] for k in order], dtype=np.float) average_k_neigh = nx.average_neighbor_degree(self.__graph) average_k_neigh = np.array([average_k_neigh[k] for k in order], dtype=np.float) self.__base_table = { "epoch": np.repeat(self.__individual.epoch, self.__ca_size). \ astype(np.int), "i_num": np.repeat(self.__individual_number, self.__ca_size). \ astype(np.int), "pr": pagerank, "hub": hubs, "auth": authorities, "ecc": eccentricity, "cls": closeness, "cls_rev": closeness_reverse, "btw": betweenness, "eig": eigenvector, "load": load, "cltr": clustering, "ind": in_degree, "outd": out_degree, "rew": rewires, "kneigh": average_k_neigh} return dict(full_data.items() + self.__base_table.items()) def save_executions(self): # Save space-time diagrams of some executions for i in np.random.choice(range(self.__executions.shape[0]), self.__save_executions, replace=False): aux.save_as_image(self.__executions[i], "images/i%04d" % self.__individual_number, "execution-%06d.png" % i) class Analysis: elems = 0 def __init__(self, data_file, ca_size, ca_iterations, ca_repeat, connection_radius, k_history, save_executions=0): self.__ca_size = ca_size self.__ca_iterations = ca_iterations self.__ca_repeat = ca_repeat self.__connection_radius = connection_radius self.__k_history = k_history self.__save_executions = save_executions self.__data_file = h5py.File(data_file, "w-") def add_individual(self, individual): # Run simulations with densities uniformly distributed in [0, 1], # storing execution data for posterio analysis correct, executions = individual.get_execution_data(UNIFORM_RHO) # Perform individual analysis individual = AnalysisIndividual(individual, correct, executions, self.__ca_size, self.__connection_radius, self.__ca_iterations, self.__ca_repeat, self.__k_history, save_executions=self.__save_executions) Analysis.elems += 1 table_cells = individual.get_cells_info() table_individual = individual.get_individual_info() individual.save_executions() del correct del executions del individual # Store the individual analysis in a HDF5 file group = self.__data_file.create_group("individual%d" % table_individual["i_num"]) cells_grp = group.create_group("cells") for key, values in table_cells.iteritems(): cells_grp.create_dataset(key, data=values, shape=values.shape, dtype=values.dtype) individuals_grp = group.create_group("individuals") for key, values in table_individual.iteritems(): individuals_grp.create_dataset(key, data=values, shape=values.shape, dtype=values.dtype) self.__data_file.flush() def get_table(self): table = { "cells": {}, "individuals": {}} for individual_grp in self.__data_file.values(): for group in ["cells", "individuals"]: for key, values in individual_grp[group].iteritems(): try: table[group][key].append(values.value) except KeyError: table[group][key] = [values.value] for group_values in table.values(): for key, values in group_values.iteritems(): group_values[key] = np.concatenate(values) return table def get_correlations(self): table = self.get_table() correlations = {'cells': {}, 'individuals': {}} refs_cells = ['lim', 'cls_rev'] for ref in refs_cells: correlations['cells'][ref] = {} ref_cell = table['cells'][ref] for key, values in table['cells'].iteritems(): if key == ref: continue correlations['cells'][ref][key] = \ st.spearmanr(ref_cell, values) refs_individuals = ['gini', 'max_min', 'short', 'fit'] for ref in refs_individuals: correlations['individuals'][ref] = {} ref_individual = table['individuals'][ref] for key, values in table['individuals'].iteritems(): if key == ref: continue correlations['individuals'][ref][key] = \ st.spearmanr(ref_individual, values) return correlations
# coding=utf-8 from __future__ import print_function, unicode_literals __author__ = "Sally Wilsak" import codecs import os import sys import textwrap import unittest import import_resolver if sys.stdout.encoding != 'utf8': sys.stdout = codecs.getwriter('utf8')(sys.stdout, 'strict') if sys.stderr.encoding != 'utf8': sys.stderr = codecs.getwriter('utf8')(sys.stderr, 'strict') def simple_normpath(path): """On Windows, normpath substitutes back slashes into the file path. This makes cross-platform testing difficult since we're checking string output. But the test cases have simple filepaths so we can substitute something simpler for the tests. """ return path.replace("./", "") def simple_join(path, *args): """ Make os.path.join work the same on Windows and Linux. Again this is ok because the test cases have simple paths """ elements = [path] elements.extend(args) return "/".join(elements) class TestImportResolver(unittest.TestCase): def setUp(self): # Monkey-patch some path manipulations so we can string match with Unix-style paths and Windows won't mess them up import_resolver.os.path.normpath = simple_normpath import_resolver.os.path.join = simple_join def test_line_extraction(self): self.assertEqual(import_resolver.extract_import_files(""), []) self.assertEqual(import_resolver.extract_import_files("This isn't TypeScript.\nBut it does have multiple lines."), []) self.assertEqual(import_resolver.extract_import_files("import thing = require('./thing.ts');"), ["./thing.ts"]) import_statements = textwrap.dedent(""" // Comments should get ignored, of course import first = require('./lib/first.ts'); // Different amounts of whitespace should be ok import second=require('./second.ts') ; // so should other stuff at the end // Double quotes are also ok import _THIRD = require("./third.ts") // So is something that's not a ts file, but it gets .ts added import fourth = require("../fourth/file/path") // A Windows-style path doesn't match... import fifth = require("C:\\fifth.ts") // ...neither does an absolute Unix-style path... import sixth = require("/home/user6/sixth.ts") // ...but this mixed-up one does import seventh = require('./folder\\folder\\seventh.ts') // Capitalizing the keywords means it doesn't match Import eighth = Require('./eighth.ts') // Something that's not a file path doesn't match import ninth = require('ninth') // If it's not at the start of the line, it doesn't match some stuff import tenth = require('./tenth.ts') // And for good measure, a non-ASCII file path should work import eleventh = require('./одиннадцать.ts') """) expected_filenames = [ "./lib/first.ts", "./second.ts", "./third.ts", "../fourth/file/path.ts", "./folder\\folder\\seventh.ts", "./одиннадцать.ts", ] self.assertEqual(import_resolver.extract_import_files(import_statements), expected_filenames) def test_format(self): files = ["/badger/badger", "C:\\badger.ts", "/bad ger/snake.ts"] self.assertEqual(import_resolver.format_line("/file/name.ts", files), "/file/name.ts <- /badger/badger C:\\badger.ts /bad\\ ger/snake.ts") def test_circular_deps(self): circular_deps = { "/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');", "/home/badger/b.ts": "import d = require('./d.ts');", "/home/badger/c.ts": "", "/home/badger/d.ts": "import a = require('./a.ts');", } import_resolver.read_file = lambda x: circular_deps[x] expected_string = "\n".join([ "/home/badger/c.ts <- /home/badger/a.ts", "/home/badger/d.ts <- /home/badger/b.ts", "/home/badger/a.ts <- /home/badger/d.ts", "/home/badger/b.ts <- /home/badger/a.ts", ]) self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string) def test_triangle_deps(self): triangle_deps = { "/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');", "/home/badger/b.ts": "import c = require('./c.ts');", "/home/badger/c.ts": "", } import_resolver.read_file = lambda x: triangle_deps[x] expected_string = "\n".join([ "/home/badger/c.ts <- /home/badger/a.ts /home/badger/b.ts", "/home/badger/a.ts <- ", "/home/badger/b.ts <- /home/badger/a.ts", ]) self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string) def test_inaccessible_deps(self): def inaccessible_deps(filename): if "a.ts" in filename: return "import b = require('./b.ts');" elif "b.ts" in filename: return "import c = require('./c.ts');" raise IOError import_resolver.read_file = inaccessible_deps expected_string = "\n".join([ "/home/badger/c.ts <- /home/badger/b.ts", "/home/badger/a.ts <- ", "/home/badger/b.ts <- /home/badger/a.ts", "Cannot read file '/home/badger/c.ts'", ]) self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string) def test_lists(self): lists_deps = { "/home/badger/a.ts": "import b = require('./b.ts');\nimport c = require('./c.ts');\nimport d = require('./d.ts');", "/home/badger/b.ts": "import c = require('./c.ts');\nimport d = require('./d.ts');", "/home/badger/c.ts": "import d = require('./d.ts');", "/home/badger/d.ts": "", } import_resolver.read_file = lambda x: lists_deps[x] expected_string = "\n".join([ "/home/badger/c.ts <- /home/badger/a.ts /home/badger/b.ts", "/home/badger/d.ts <- /home/badger/a.ts /home/badger/b.ts /home/badger/c.ts", "/home/badger/a.ts <- ", "/home/badger/b.ts <- /home/badger/a.ts", ]) self.assertEqual(import_resolver.do_dependency_resolve(["/home/badger/a.ts"]), expected_string)
from twitter_rec import Api import time USERNAME = "liaoyisheng89@sina.com" PASSWD = "bigdata" s = Api.Session(USERNAME, PASSWD, debug=False) s.connect() counter = 0 while True: _ = s.read("/AllenboChina/followers") if "eason" in _: print counter counter += 1 else: assert False
import cv if __name__ == "__main__": capture = cv.CaptureFromCAM(-1) cv.NamedWindow("image") while True: frame = cv.QueryFrame(capture) cv.ShowImage("image", frame) k = cv.WaitKey(10) if k % 256 == 27: break cv.DestroyWindow("image")
from __future__ import with_statement import gtk, gobject, sys, os import pango from lib import * from libu import * class ComputerDoctorPane(gtk.VBox): icon = D+'sora_icons/m_computer_doctor.png' text = _('Computer\nDoctor') def render_type_func(self, column, cell, model, iter): cure_obj = model.get_value(iter, 1) pixbuf = [self.icon_must_fix, self.icon_suggestion][cure_obj.type] cell.set_property('pixbuf', pixbuf) def render_text_func(self, column, cell, model, iter): cure_obj = model.get_value(iter, 1) markup = '<b>%s</b>' % cure_obj.__doc__ if cure_obj.detail: markup += '\n' + cure_obj.detail cell.set_property('markup', markup) def toggled(self, render_toggle, path, sortedstore): path = sortedstore.convert_path_to_child_path(path) self.liststore[path][0] = not self.liststore[path][0] sensitive = False for row in self.liststore: to_apply = row[0] sensitive = sensitive or to_apply self.button_apply.set_sensitive(sensitive) def sort_by_type(self, model, iter1, iter2): obj1 = model.get_value(iter1, 1) obj2 = model.get_value(iter2, 1) if obj1 and obj2: return cmp(obj1.type, obj2.type) or cmp(obj1.__doc__, obj2.__doc__) else: return 0 def sort_by_text(self, model, iter1, iter2): obj1 = model.get_value(iter1, 1) obj2 = model.get_value(iter2, 1) if obj1 and obj2: return cmp(obj1.__doc__, obj2.__doc__) else: return 0 def refresh(self): self.liststore.clear() for obj in self.cure_objs: if obj.exists(): self.liststore.append([False, obj]) self.sortedstore.set_sort_column_id(1000, gtk.SORT_ASCENDING) self.button_apply.set_sensitive(False) self.show_text('') must_fix = 0 for row in self.liststore: obj = row[1] if obj.type == C.MUST_FIX: must_fix += 1 text = '' if len(self.liststore): if must_fix: text += _('Found %s errors in your system.') % must_fix text += ' ' text += _('There is a total of %s suggestions.') % len(self.liststore) else: text = _('Found no error :)') self.show_text(text) def apply(self): success = 0 for row in self.liststore: apply = row[0] if apply: obj = row[1] try: obj.cure() success += 1 except: print_traceback() self.refresh() if success: notify(_('Computer doctor'), _('Successfully applied %s suggestions.') % success) def show_text(self, text): self.column_text.set_title(text) def __init__(self, main_view, cure_objs): self.cure_objs = cure_objs self.icon_must_fix = get_pixbuf(D+'sora_icons/c_must_fix.png', 24, 24) self.icon_suggestion = get_pixbuf(D+'sora_icons/c_suggestion.png', 24, 24) self.liststore = liststore = gtk.ListStore(bool, gobject.TYPE_PYOBJECT) # apply?, cure_object self.sortedstore = sortedstore = gtk.TreeModelSort(liststore) sortedstore.set_sort_func(1000, self.sort_by_type) sortedstore.set_sort_func(1001, self.sort_by_text) render_toggle = gtk.CellRendererToggle() render_toggle.connect('toggled', self.toggled, sortedstore) render_type = gtk.CellRendererPixbuf() render_text = gtk.CellRendererText() render_text.set_property('ellipsize', pango.ELLIPSIZE_END) column_toggle = gtk.TreeViewColumn() column_toggle.pack_start(render_toggle, False) column_toggle.add_attribute(render_toggle, 'active', 0) column_toggle.set_sort_column_id(0) column_type = gtk.TreeViewColumn() column_type.pack_start(render_type, False) column_type.set_cell_data_func(render_type, self.render_type_func) column_type.set_sort_column_id(1000) self.column_text = column_text = gtk.TreeViewColumn() column_text.pack_start(render_text) column_text.set_cell_data_func(render_text, self.render_text_func) column_text.set_sort_column_id(1001) self.view = view = gtk.TreeView(sortedstore) view.set_rules_hint(True) view.append_column(column_toggle) view.append_column(column_type) view.append_column(column_text) scroll = gtk.ScrolledWindow() scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC) scroll.set_shadow_type(gtk.SHADOW_IN) scroll.add(view) button_refresh = image_stock_button(gtk.STOCK_REFRESH, _('Refresh')) button_refresh.connect('clicked', lambda *w: self.refresh()) self.button_apply = button_apply = image_stock_button(gtk.STOCK_APPLY, _('Apply')) button_apply.connect('clicked', lambda *w: self.apply()) button_apply.set_sensitive(False) button_box = gtk.HBox(False, 10) button_box.pack_start(button_refresh, False) button_box.pack_start(button_apply, False) gtk.VBox.__init__(self, False, 10) self.set_border_width(5) self.pack_start(button_box, False) self.pack_start(scroll) self.show_text(_('Please click "refresh" button.')) self.refresh()
import os import re import netifaces as ni from socket import * from Components.Console import Console from Components.PluginComponent import plugins from Plugins.Plugin import PluginDescriptor from boxbranding import getBoxType class Network: def __init__(self): self.ifaces = {} self.configuredNetworkAdapters = [] self.NetworkState = 0 self.DnsState = 0 self.nameservers = [] self.ethtool_bin = "/usr/sbin/ethtool" self.console = Console() self.linkConsole = Console() self.restartConsole = Console() self.deactivateInterfaceConsole = Console() self.activateInterfaceConsole = Console() self.resetNetworkConsole = Console() self.dnsConsole = Console() self.pingConsole = Console() self.config_ready = None self.friendlyNames = {} self.lan_interfaces = [] self.wlan_interfaces = [] self.remoteRootFS = None self.getInterfaces() def onRemoteRootFS(self): if self.remoteRootFS is None: import Harddisk for parts in Harddisk.getProcMounts(): if parts[1] == '/' and parts[2] == 'nfs': self.remoteRootFS = True break else: self.remoteRootFS = False return self.remoteRootFS def isBlacklisted(self, iface): return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0', 'sys0', 'p2p0') def getInterfaces(self, callback=None): self.configuredInterfaces = [] for device in self.getInstalledAdapters(): self.getAddrInet(device, callback) # helper function def regExpMatch(self, pattern, string): if string is None: return None try: return pattern.search(string).group() except AttributeError: return None # helper function to convert ips from a sring to a list of ints def convertIP(self, ip): return [int(n) for n in ip.split('.')] def getAddrInet(self, iface, callback): data = {'up': False, 'dhcp': False, 'preup': False, 'predown': False} try: data['up'] = int(open('/sys/class/net/%s/flags' % iface).read().strip(), 16) & 1 == 1 if data['up']: self.configuredInterfaces.append(iface) nit = ni.ifaddresses(iface) data['ip'] = self.convertIP(nit[ni.AF_INET][0]['addr']) # ipv4 data['netmask'] = self.convertIP(nit[ni.AF_INET][0]['netmask']) data['bcast'] = self.convertIP(nit[ni.AF_INET][0]['broadcast']) data['mac'] = nit[ni.AF_LINK][0]['addr'] # mac data['gateway'] = self.convertIP(ni.gateways()['default'][ni.AF_INET][0]) # default gw except: data['dhcp'] = True data['ip'] = [0, 0, 0, 0] data['netmask'] = [0, 0, 0, 0] data['gateway'] = [0, 0, 0, 0] self.ifaces[iface] = data self.loadNetworkConfig(iface, callback) def writeNetworkConfig(self): self.configuredInterfaces = [] fp = file('/etc/network/interfaces', 'w') fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n") fp.write("auto lo\n") fp.write("iface lo inet loopback\n\n") for ifacename, iface in self.ifaces.items(): if iface['up']: fp.write("auto " + ifacename + "\n") self.configuredInterfaces.append(ifacename) if iface['dhcp']: fp.write("iface " + ifacename + " inet dhcp\n") fp.write("udhcpc_opts -T1 -t9\n") if not iface['dhcp']: fp.write("iface " + ifacename + " inet static\n") if 'ip' in iface: print tuple(iface['ip']) fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip'])) fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask'])) if 'gateway' in iface: fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway'])) if "configStrings" in iface: fp.write(iface["configStrings"]) if iface["preup"] is not False and "configStrings" not in iface: fp.write(iface["preup"]) if iface["predown"] is not False and "configStrings" not in iface: fp.write(iface["predown"]) fp.write("\n") fp.close() self.configuredNetworkAdapters = self.configuredInterfaces self.writeNameserverConfig() def writeNameserverConfig(self): fp = file('/etc/resolv.conf', 'w') for nameserver in self.nameservers: fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver)) fp.close() def loadNetworkConfig(self, iface, callback=None): interfaces = [] # parse the interfaces-file try: fp = file('/etc/network/interfaces', 'r') interfaces = fp.readlines() fp.close() except: print "[Network.py] interfaces - opening failed" ifaces = {} currif = "" for i in interfaces: split = i.strip().split(' ') if split[0] == "iface": currif = split[1] ifaces[currif] = {} if len(split) == 4 and split[3] == "dhcp": ifaces[currif]["dhcp"] = True else: ifaces[currif]["dhcp"] = False if currif == iface: #read information only for available interfaces if split[0] == "address": ifaces[currif]["address"] = map(int, split[1].split('.')) if "ip" in self.ifaces[currif]: if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["ip"] = map(int, split[1].split('.')) if split[0] == "netmask": ifaces[currif]["netmask"] = map(int, split[1].split('.')) if "netmask" in self.ifaces[currif]: if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["netmask"] = map(int, split[1].split('.')) if split[0] == "gateway": ifaces[currif]["gateway"] = map(int, split[1].split('.')) if "gateway" in self.ifaces[currif]: if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["gateway"] = map(int, split[1].split('.')) if split[0] == "pre-up": if "preup" in self.ifaces[currif]: self.ifaces[currif]["preup"] = i if split[0] in ("pre-down", "post-down"): if "predown" in self.ifaces[currif]: self.ifaces[currif]["predown"] = i for ifacename, iface in ifaces.items(): if ifacename in self.ifaces: self.ifaces[ifacename]["dhcp"] = iface["dhcp"] if not self.console.appContainers: # save configured interfacelist self.configuredNetworkAdapters = self.configuredInterfaces # load ns only once self.loadNameserverConfig() print "read configured interface:", ifaces print "self.ifaces after loading:", self.ifaces self.config_ready = True self.msgPlugins() if callback is not None: callback(True) def loadNameserverConfig(self): ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}" nameserverPattern = re.compile("nameserver +" + ipRegexp) ipPattern = re.compile(ipRegexp) resolv = [] try: fp = file('/etc/resolv.conf', 'r') resolv = fp.readlines() fp.close() self.nameservers = [] except: print "[Network.py] resolv.conf - opening failed" for line in resolv: if self.regExpMatch(nameserverPattern, line) is not None: ip = self.regExpMatch(ipPattern, line) if ip: self.nameservers.append(self.convertIP(ip)) print "nameservers:", self.nameservers def getInstalledAdapters(self): return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)] def getConfiguredAdapters(self): return self.configuredNetworkAdapters def getNumberOfAdapters(self): return len(self.ifaces) def getFriendlyAdapterName(self, x): if x in self.friendlyNames.keys(): return self.friendlyNames.get(x, x) self.friendlyNames[x] = self.getFriendlyAdapterNaming(x) return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name def getFriendlyAdapterNaming(self, iface): name = None if self.isWirelessInterface(iface): if iface not in self.wlan_interfaces: name = _("WLAN connection") if len(self.wlan_interfaces): name += " " + str(len(self.wlan_interfaces) + 1) self.wlan_interfaces.append(iface) else: if iface not in self.lan_interfaces: if iface == "eth1": name = _("VLAN connection") else: name = _("LAN connection") if len(self.lan_interfaces) and not iface == "eth1": name += " " + str(len(self.lan_interfaces) + 1) self.lan_interfaces.append(iface) return name def getFriendlyAdapterDescription(self, iface): if not self.isWirelessInterface(iface): return _('Ethernet network interface') moduledir = self.getWlanModuleDir(iface) if moduledir: name = os.path.basename(os.path.realpath(moduledir)) if name.startswith('ath') or name.startswith('carl'): name = 'Atheros' elif name.startswith('rt2') or name.startswith('rt3') or name.startswith('rt5') or name.startswith('rt6') or name.startswith('rt7'): name = 'Ralink' elif name.startswith('zd'): name = 'Zydas' elif name.startswith('rtl') or name.startswith('r8'): name = 'Realtek' elif name.startswith('smsc'): name = 'SMSC' elif name.startswith('peg'): name = 'Pegasus' elif name.startswith('rn'): name = 'RNDIS' elif name.startswith('mw') or name.startswith('libertas'): name = 'Marvel' elif name.startswith('p5'): name = 'Prism' elif name.startswith('as') or name.startswith('ax'): name = 'ASIX' elif name.startswith('dm'): name = 'Davicom' elif name.startswith('mcs'): name = 'MosChip' elif name.startswith('at'): name = 'Atmel' elif name.startswith('iwm'): name = 'Intel' elif name.startswith('brcm') or name.startswith('bcm'): name = 'Broadcom' elif os.path.isdir('/tmp/bcm/' + iface): name = 'Broadcom' else: name = _('Unknown') return name + ' ' + _('wireless network interface') def getAdapterName(self, iface): return iface def getAdapterList(self): return self.ifaces.keys() def getAdapterAttribute(self, iface, attribute): return self.ifaces.get(iface, {}).get(attribute) def setAdapterAttribute(self, iface, attribute, value): print "setting for adapter", iface, "attribute", attribute, " to value", value if iface in self.ifaces: self.ifaces[iface][attribute] = value def removeAdapterAttribute(self, iface, attribute): if iface in self.ifaces and attribute in self.ifaces[iface]: del self.ifaces[iface][attribute] def getNameserverList(self): if len(self.nameservers) == 0: return [[0, 0, 0, 0], [0, 0, 0, 0]] else: return self.nameservers def clearNameservers(self): self.nameservers = [] def addNameserver(self, nameserver): if nameserver not in self.nameservers: self.nameservers.append(nameserver) def removeNameserver(self, nameserver): if nameserver in self.nameservers: self.nameservers.remove(nameserver) def changeNameserver(self, oldnameserver, newnameserver): if oldnameserver in self.nameservers: for i in range(len(self.nameservers)): if self.nameservers[i] == oldnameserver: self.nameservers[i] = newnameserver def resetNetworkConfig(self, mode='lan', callback=None): self.commands = [] self.commands.append("/etc/init.d/avahi-daemon stop") for iface in self.ifaces.keys(): if iface != 'eth0' or not self.onRemoteRootFS(): self.commands.append("/sbin/ip addr flush dev " + iface + " scope global") self.commands.append("/etc/init.d/networking stop") self.commands.append("killall -9 udhcpc") self.commands.append("rm /var/run/udhcpc*") self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True) def resetNetworkFinishedCB(self, extra_args): (mode, callback) = extra_args if not self.resetNetworkConsole.appContainers: self.writeDefaultNetworkConfig(mode, callback) def writeDefaultNetworkConfig(self, mode='lan', callback=None): fp = file('/etc/network/interfaces', 'w') fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n") fp.write("auto lo\n") fp.write("iface lo inet loopback\n\n") if mode == 'wlan': fp.write("auto wlan0\n") fp.write("iface wlan0 inet dhcp\n") if mode == 'wlan-mpci': fp.write("auto ath0\n") fp.write("iface ath0 inet dhcp\n") if mode == 'lan': fp.write("auto eth0\n") fp.write("iface eth0 inet dhcp\n") fp.write("\n") fp.close() self.commands = [] if mode == 'wlan': self.commands.append("/sbin/ifconfig eth0 down") self.commands.append("/sbin/ifconfig ath0 down") self.commands.append("/sbin/ifconfig wlan0 up") if mode == 'wlan-mpci': self.commands.append("/sbin/ifconfig eth0 down") self.commands.append("/sbin/ifconfig wlan0 down") self.commands.append("/sbin/ifconfig ath0 up") if mode == 'lan': self.commands.append("/sbin/ifconfig eth0 up") self.commands.append("/sbin/ifconfig wlan0 down") self.commands.append("/sbin/ifconfig ath0 down") self.commands.append("/etc/init.d/avahi-daemon start") self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode, callback], debug=True) def resetNetworkFinished(self, extra_args): (mode, callback) = extra_args if not self.resetNetworkConsole.appContainers: if callback is not None: callback(True, mode) def checkNetworkState(self, statecallback): self.NetworkState = 0 self.pingConsole = Console() for server in ("www.openpli.org", "www.google.nl", "www.google.com"): self.pingConsole.ePopen(("/bin/ping", "/bin/ping", "-c", "1", server), self.checkNetworkStateFinished, statecallback) def checkNetworkStateFinished(self, result, retval, extra_args): (statecallback) = extra_args if self.pingConsole is not None: if retval == 0: self.pingConsole = None statecallback(self.NetworkState) else: self.NetworkState += 1 if not self.pingConsole.appContainers: statecallback(self.NetworkState) def restartNetwork(self, callback=None): self.config_ready = False self.msgPlugins() self.commands = [] self.commands.append("/etc/init.d/avahi-daemon stop") for iface in self.ifaces.keys(): if iface != 'eth0' or not self.onRemoteRootFS(): self.commands.append(("/sbin/ifdown", "/sbin/ifdown", iface)) self.commands.append("/sbin/ip addr flush dev " + iface + " scope global") self.commands.append("/etc/init.d/networking stop") self.commands.append("killall -9 udhcpc") self.commands.append("rm /var/run/udhcpc*") self.commands.append("/etc/init.d/networking start") self.commands.append("/etc/init.d/avahi-daemon start") self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True) def restartNetworkFinished(self, extra_args): (callback) = extra_args if callback is not None: callback(True) def getLinkState(self, iface, callback): self.linkConsole.ePopen((self.ethtool_bin, self.ethtool_bin, iface), self.getLinkStateFinished, callback) def getLinkStateFinished(self, result, retval, extra_args): (callback) = extra_args if not self.linkConsole.appContainers: callback(result) def stopPingConsole(self): if self.pingConsole is not None: self.pingConsole.killAll() def stopLinkStateConsole(self): self.linkConsole.killAll() def stopDNSConsole(self): if self.dnsConsole is not None: self.dnsConsole.killAll() def stopRestartConsole(self): self.restartConsole.killAll() def stopGetInterfacesConsole(self): self.console.killAll() def stopDeactivateInterfaceConsole(self): self.deactivateInterfaceConsole.killAll() def stopActivateInterfaceConsole(self): self.activateInterfaceConsole.killAll() def checkforInterface(self, iface): if self.getAdapterAttribute(iface, 'up') is True: return True else: ret = os.system("ifconfig " + iface + " up") os.system("ifconfig " + iface + " down") if ret == 0: return True else: return False def checkDNSLookup(self, statecallback): self.DnsState = 0 self.dnsConsole = Console() for server in ("www.openpli.org", "www.google.nl", "www.google.com"): self.dnsConsole.ePopen(("/usr/bin/nslookup", "/usr/bin/nslookup", server), self.checkDNSLookupFinished, statecallback) def checkDNSLookupFinished(self, result, retval, extra_args): (statecallback) = extra_args if self.dnsConsole is not None: if retval == 0: self.dnsConsole = None statecallback(self.DnsState) else: self.DnsState += 1 if not self.dnsConsole.appContainers: statecallback(self.DnsState) def deactivateInterface(self, ifaces, callback=None): self.config_ready = False self.msgPlugins() commands = [] def buildCommands(iface): commands.append(("/sbin/ifdown", "/sbin/ifdown", "-f", iface)) commands.append(("/sbin/ip", "/sbin/ip", "addr", "flush", "dev", iface, "scope", "global")) #wpa_supplicant sometimes doesn't quit properly on SIGTERM if os.path.exists('/var/run/wpa_supplicant/' + iface): commands.append("wpa_cli -i" + iface + " terminate") if isinstance(ifaces, (list, tuple)): for iface in ifaces: if iface != 'eth0' or not self.onRemoteRootFS(): buildCommands(iface) else: if ifaces == 'eth0' and self.onRemoteRootFS(): if callback is not None: callback(True) return buildCommands(ifaces) self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, (ifaces, callback), debug=True) def deactivateInterfaceFinished(self, extra_args): (ifaces, callback) = extra_args if not self.deactivateInterfaceConsole.appContainers: if callback is not None: callback(True) def activateInterface(self, iface, callback=None): if self.config_ready: self.config_ready = False self.msgPlugins() if iface == 'eth0' and self.onRemoteRootFS(): if callback is not None: callback(True) return commands = [] commands.append(("/sbin/ifup", "/sbin/ifup", iface)) self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True) def activateInterfaceFinished(self, extra_args): callback = extra_args if not self.activateInterfaceConsole.appContainers: if callback is not None: callback(True) def sysfsPath(self, iface): return '/sys/class/net/' + iface def isWirelessInterface(self, iface): if iface in self.wlan_interfaces: return True if os.path.isdir(self.sysfsPath(iface) + '/wireless'): return True # r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless device = re.compile('[a-z]{2,}[0-9]*:') ifnames = [] fp = open('/proc/net/wireless', 'r') for line in fp: try: ifnames.append(device.search(line).group()[:-1]) except AttributeError: pass if iface in ifnames: return True return False def getWlanModuleDir(self, iface=None): devicedir = self.sysfsPath(iface) + '/device' if not os.path.isdir(devicedir): return None moduledir = devicedir + '/driver/module' if os.path.isdir(moduledir): return moduledir # identification is not possible over default moduledir for x in os.listdir(devicedir): # rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx if x.startswith("1-"): moduledir = devicedir + '/' + x + '/driver/module' if os.path.isdir(moduledir): return moduledir # rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here moduledir = devicedir + '/driver' if os.path.isdir(moduledir): return moduledir return None def detectWlanModule(self, iface=None): if not self.isWirelessInterface(iface): return None devicedir = self.sysfsPath(iface) + '/device' if os.path.isdir(devicedir + '/ieee80211'): return 'nl80211' moduledir = self.getWlanModuleDir(iface) if moduledir: module = os.path.basename(os.path.realpath(moduledir)) if module in ('ath_pci', 'ath5k'): return 'madwifi' if module in ('rt73', 'rt73'): return 'ralink' if module == 'zd1211b': return 'zydas' if module == 'brcm-systemport': return 'brcm-wl' return 'wext' def calc_netmask(self, nmask): from struct import pack from socket import inet_ntoa mask = 1L << 31 xnet = (1L << 32) - 1 cidr_range = range(0, 32) cidr = long(nmask) if cidr not in cidr_range: print 'cidr invalid: %d' % cidr return None else: nm = ((1L << cidr) - 1) << (32 - cidr) netmask = str(inet_ntoa(pack('>L', nm))) return netmask def msgPlugins(self): if self.config_ready is not None: for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ): p(reason=self.config_ready) def hotplug(self, event): interface = event['INTERFACE'] if self.isBlacklisted(interface): return action = event['ACTION'] if action == "add": print "[Network] Add new interface:", interface self.getAddrInet(interface, None) elif action == "remove": print "[Network] Removed interface:", interface try: del self.ifaces[interface] except KeyError: pass iNetwork = Network() def InitNetwork(): pass
class RC(object): """Return codes of binaries used throughout this project. See ``source`` for more details.""" SUCCESS = 0 INCORRECT_ARGS = 1 NO_INTERNET = 2 NO_KANO_WORLD_ACC = 3 CANNOT_CREATE_FLAG = 4 # read-only fs? # kano-feedback-cli specific. ERROR_SEND_DATA = 10 ERROR_COPY_ARCHIVE = 11 ERROR_CREATE_FLAG = 12
import sys,csv import codecs import os def charStat (text): # set default value stat = {} # go through the characters one by one for character in text: #print (character) # retrieve current value for a character, # and 0 if still not in list # update the list stat[character] = stat.get(character,0) + 1 # return statistics array return stat numPara = len(sys.argv) if numPara < 2: print ("invalid number of parameters: 1 filename required.") print ("call for output on-screen: python %s " % sys.argv[0]) print ("call for file output: python %s > statistics.csv" % sys.argv[0]) print ("Exiting.") sys.exit(2) textfileName = sys.argv[1] bytes = min(32, os.path.getsize(textfileName)) raw = open(textfileName, 'rb').read(bytes) if raw.startswith(codecs.BOM_UTF8): encoding = 'utf-8-sig' else: result = chardet.detect(raw) encoding = result['encoding'] fileHandle = open(textfileName, "r", encoding=encoding) data = fileHandle.read() fileHandle.close() statistics = charStat(data) items = statistics.items() sortedItems = sorted(items) lines = [] for singleItem in sortedItems: lines.append(str(singleItem[0]) + "," + singleItem[1]) #print ("%s,%i" % (singleItem[0], singleItem[1])) fileHandle = open("s.txt", "w", encoding=encoding) data = fileHandle.writelines(lines) fileHandle.close()
import sys import optparse import socket def main(): p = optparse.OptionParser() p.add_option("--port", "-p", default=8888) p.add_option("--input", "-i", default="test.txt") options, arguments = p.parse_args() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(("localhost", options.port)) fp = open(options.input, "r") ii = 0 sock.sendall ("^0^1^sheet1^1000000^3\n") while ii < 1000000: sock.sendall ("^%d^0^sheet1^%d^0^^0\n" %(ii, ii)) ii = ii + 1 sock.close() if __name__ == '__main__': main()
""" Entrance of ICM Desktop Agent """ import os import signal import sys import time import socket from twisted.internet import reactor from twisted.internet import task from umit.icm.agent.logger import g_logger from umit.icm.agent.BasePaths import * from umit.icm.agent.Global import * from umit.icm.agent.Version import VERSION from umit.icm.agent.rpc.message import * from umit.icm.agent.rpc.MessageFactory import MessageFactory from umit.icm.agent.I18N import _ import imp frozen = (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") # old py2exe or imp.is_frozen("__main__")) # tools/freeze del(imp) def main_is_frozen(): return frozen class Application(object): def __init__(self): pass def _init_components(self, aggregator): from umit.icm.agent.core.PeerInfo import PeerInfo self.peer_info = PeerInfo() from umit.icm.agent.core.PeerManager import PeerManager self.peer_manager = PeerManager() from umit.icm.agent.core.EventManager import EventManager self.event_manager = EventManager() from umit.icm.agent.core.TaskManager import TaskManager self.task_manager = TaskManager() from umit.icm.agent.core.ReportManager import ReportManager self.report_manager = ReportManager() from umit.icm.agent.core.ReportUploader import ReportUploader self.report_uploader = ReportUploader(self.report_manager) from umit.icm.agent.core.TaskScheduler import TaskScheduler self.task_scheduler = TaskScheduler(self.task_manager, self.report_manager) from umit.icm.agent.core.TaskAssignFetch import TaskAssignFetch self.task_assign = TaskAssignFetch(self.task_manager) from umit.icm.agent.core.TestSetsFetcher import TestSetsFetcher self.test_sets = TestSetsFetcher(self.task_manager, self.report_manager) from umit.icm.agent.secure.KeyManager import KeyManager self.key_manager = KeyManager() from umit.icm.agent.core.Statistics import Statistics self.statistics = Statistics() from umit.icm.agent.rpc.aggregator import AggregatorAPI self.aggregator = AggregatorAPI(aggregator) from umit.icm.agent.super.SuperBehaviourByManual import SuperBehaviourByManual self.speer_by_manual = SuperBehaviourByManual(self) self.quitting = False self.is_auto_login = False self.is_successful_login = False #fix the login failure, save DB problem def _load_from_db(self): """ """ self.peer_manager.load_from_db() # restore unsent reports self.report_manager.load_unsent_reports() # desktop agent stats saving self.statistics.load_from_db() def init_after_running(self, port=None, username=None, password=None, server_enabled=True, skip_server_check=False): """ """ ##################################################### # Create agent service(need to add the port confilct) if server_enabled: self.listen_port = port if port is not None else g_config.getint('network', 'listen_port') try: from umit.icm.agent.rpc.AgentService import AgentFactory self.factory = AgentFactory() g_logger.info("Listening on port %d.", self.listen_port) reactor.listenTCP(self.listen_port, self.factory) except Exception,info: #There can add more information self.quit_window_in_wrong(primary_text = _("The Listen Port has been used by other applications"), \ secondary_text = _("Please check the Port") ) ############################# # Create mobile agent service from umit.icm.agent.rpc.mobile import MobileAgentService self.ma_service = MobileAgentService() if self.use_gui: import gtk # Init GUI from umit.icm.agent.gui.GtkMain import GtkMain self.gtk_main = GtkMain() self.is_auto_login = g_config.getboolean('application', 'auto_login_swittch') ################################################################### #debug switch: It can show the gtkWindow without any authentication if g_config.getboolean('debug','debug_switch') and self.use_gui: self.login_simulate() ###################################### #check aggregator can be reached first if not skip_server_check: defer_ = self.aggregator.check_aggregator_website() defer_.addCallback(self.check_aggregator_success) defer_.addErrback(self.check_aggregator_failed) def check_aggregator_success(self,response): """ """ if response == True: self.login_window_show() else: self.speer_by_manual.peer_communication() def login_window_show(self): """ """ if self.is_auto_login and self.use_gui : ####################################################### #login with saved username or password, not credentials self.peer_info.load_from_db() ######################################## #Add more condition to check login legal self.login(self.peer_info.Username,self.peer_info.Password, True) else: if self.use_gui: self.gtk_main.show_login() else: self.login_without_gui() g_logger.info("Auto-login is disabled. You need to manually login.") def check_aggregator_failed(self,message): """ """ self.aggregator.available = False self.speer_by_manual.peer_communication() def login_without_gui(self): """ Users login without username or password """ username = False password = False if g_config.has_section("credentials"): username = g_config.get("credentials", "user") password = g_config.get("credentials", "password") if not username: username = raw_input("User Name:") if not password: password = raw_input("Password:") self.login(username, password, save_login=True) def check_software_auto(self): """ check software: according the time and other configurations """ from umit.icm.agent.core.Updater import auto_check_update ############################## #Software update automatically if g_config.getboolean('application','auto_update'): defer_ = auto_check_update(auto_upgrade=True) defer_.addErrback(self._handle_errback) else: ############################ #Detect update automatically if g_config.getboolean('update', 'update_detect'): #Here can set some update attributes defer_ = auto_check_update(auto_upgrade=False) defer_.addErrback(self._handle_errback) def register_agent(self, username, password): """ """ defer_ = self.aggregator.register(username, password) defer_.addCallback(self._handle_register) defer_.addErrback(self._handle_errback) return defer_ def _handle_register(self, result): if result: self.peer_info.ID = result['id'] self.peer_info.CipheredPublicKeyHash = result['hash'] self.peer_info.is_registered = True g_logger.debug("Register to Aggregator: %s" % result['id']) return result def _handle_errback(self, failure): """ """ failure.printTraceback() g_logger.error(">>> Failure from Application: %s" % failure) def login(self, username, password, save_login=False, login_only=False): """ """ if self.use_gui: self.gtk_main.set_to_logging_in() if self.is_auto_login and self.use_gui and self.check_username(username,password): #auto-login, select the credentials username and password from DB return self._login_after_register_callback(None, username, password, save_login, login_only) else: #manually login, we should check whether the username and password exists in database #If *NOT*, we should register the username and password to aggregator #IF *YES*, we will use credentials in DB g_config.set('application', 'auto_login_swittch', False) if self.check_username(username,password): return self._login_after_register_callback(None, username, password, save_login, login_only) else: self.peer_info.clear_db() deferred = self.register_agent(username, password) deferred.addCallback(self._login_after_register_callback, username, password, save_login, login_only) deferred.addErrback(self._handle_errback) return deferred def check_username(self,username="",password=""): """ check username and password in DB, the information is got from Login-Window """ rs = g_db_helper.select("select * from peer_info where username='%s' and \ password='%s'"%(username,password)) if not rs: g_logger.info("No matching peer info in db.\ icm-agent will register the username or password") return False else: g_logger.info("Match the username and password, \ we will change the default credentials") g_logger.debug(rs[0]) self.peer_info.ID = rs[0][0] self.peer_info.Username = rs[0][1] self.peer_info.Password = rs[0][2] self.peer_info.Email = rs[0][3] self.peer_info.CipheredPublicKeyHash = rs[0][4] self.peer_info.Type = rs[0][5] self.peer_info.is_registered = True return True def _login_after_register_callback(self, message, username, password, save_login, login_only): """ """ defer_ = self.aggregator.login(username, password) defer_.addCallback(self._handle_login, username, password, save_login, login_only) defer_.addErrback(self._handle_login_errback) return defer_ def _handle_login_errback(self,failure): """ """ print "------------------login failed!-------------------" failure.printTraceback() g_logger.error(">>> Failure from Application: %s" % failure) def _handle_login(self, result, username, password, save_login,login_only=False): """ """ #login successfully if result: self.peer_info.Username = username if username !="" and username != None else self.peer_info.Username self.peer_info.Password = password if password !="" and password != None else self.peer_info.Password #print self.peer_info.Username, self.peer_info.Password self.peer_info.is_logged_in = True #self.peer_info.clear_db() self.peer_info.save_to_db() g_logger.debug("Login Successfully :%s@%s" % (username,password)) if save_login: g_config.set('application', 'auto_login_swittch', True) else: g_config.set('application', 'auto_login_swittch', False) if self.use_gui: self.gtk_main.set_login_status(True) if login_only: return result #Load peers and reports from DB self._load_from_db() #check the new software(should appear after login successfully) self.check_software_auto() #mark login-successful self.is_successful_login = True #Task Looping manager self.task_loop_manager() return result def login_simulate(self): """ Only test GTK features """ #GTK show if self.use_gui == True: self.gtk_main.set_login_status(True) #Basic Information self.peer_info.load_from_db() self._load_from_db() #mark login-successful self.is_successful_login = True #TASK LOOP self.task_loop_manager() def task_loop_manager(self): """""" # Add looping calls if not hasattr(self, 'peer_maintain_lc'): self.peer_maintain_lc = task.LoopingCall(self.peer_manager.maintain) self.peer_maintain_lc.start(7200) if not hasattr(self, 'task_run_lc'): g_logger.info("Starting task scheduler looping ") self.task_run_lc = task.LoopingCall(self.task_scheduler.schedule) task_scheduler_text = g_config.get("Timer","task_scheduler_timer") if task_scheduler_text != "": indival = float(task_scheduler_text) else: indival = 30 self.task_run_lc.start(indival) if not hasattr(self, 'report_proc_lc'): g_logger.info("Starting report upload looping ") self.report_proc_lc = task.LoopingCall(self.report_uploader.process) report_uploade_text = g_config.get("Timer","send_report_timer") if report_uploade_text != "": indival = float(report_uploade_text) else: indival = 30 self.report_proc_lc.start(indival) if not hasattr(self,'task_assign_lc'): g_logger.info("Starting get assigned task from Aggregator") self.task_assgin_lc = task.LoopingCall(self.task_assign.fetch_task) task_assign_text = g_config.get("Timer","task_assign_timer") if task_assign_text != "": indival = float(task_assign_text) else: indival = 30 self.task_assgin_lc.start(indival) if not hasattr(self,'test_sets_fetch_lc'): g_logger.info("Starting get test sets from Aggregator") self.test_sets_fetch_lc = task.LoopingCall(self.test_sets.fetch_tests) test_fetch_text = g_config.get("Timer","test_fetch_timer") if test_fetch_text != "": indival = float(test_fetch_text) else: indival = 30 self.test_sets_fetch_lc.start(indival) def logout(self): defer_ = self.aggregator.logout() defer_.addCallback(self._handle_logout) return defer_ def _handle_logout(self, result): if self.use_gui: self.gtk_main.set_login_status(False) g_config.set('application', 'auto_login_swittch', False) return result def start(self, run_reactor=True, managed_mode=False, aggregator=None): """ The Main function """ g_logger.info("Starting ICM agent. Version: %s", VERSION) self._init_components(aggregator) reactor.addSystemEventTrigger('before', 'shutdown', self.on_quit) if not managed_mode: # This is necessary so the bot can take over and control the agent reactor.callWhenRunning(self.init_after_running) if run_reactor: # This is necessary so the bot can take over and control the agent reactor.run() def quit_window_in_wrong(self,primary_text = "",secondary_text = ""): """ """ #There can add more information from higwidgets.higwindows import HIGAlertDialog #print 'The exception is %s'%(info) alter = HIGAlertDialog(primary_text = primary_text,\ secondary_text = secondary_text) alter.show_all() result = alter.run() #cannot write return, if so the program cannot quit, and run in background self.terminate() def terminate(self): #print 'quit' reactor.callWhenRunning(reactor.stop) def on_quit(self): if hasattr(self, 'peer_info') and self.is_successful_login: g_logger.info("[quit]:save peer_info into DB") self.peer_info.save_to_db() if hasattr(self, 'peer_manager') and self.is_successful_login: g_logger.info("[quit]:save peer_manager into DB") self.peer_manager.save_to_db() if hasattr(self, 'statistics') and self.is_successful_login: g_logger.info("[quit]:save statistics into DB") self.statistics.save_to_db() if hasattr(self,'test_sets') and self.is_successful_login \ and os.path.exists(CONFIG_PATH): #store test_version id self.test_sets.set_test_version(self.test_sets.current_test_version) m = os.path.join(ROOT_DIR, 'umit', 'icm', 'agent', 'agent_restart_mark') if os.path.exists(m): os.remove(m) self.quitting = True g_logger.info("ICM Agent quit.") theApp = Application() if __name__ == "__main__": #theApp.start() pass
import ArtusConfigBase as base import mc def config(): conf = mc.config() l = [] for pipeline in conf['Pipelines']: if not pipeline.startswith('all'): l.append(pipeline) elif 'CHS' not in pipeline: l.append(pipeline) for pipeline in l: del conf['Pipelines'][pipeline] for pipeline in conf['Pipelines']: conf['Pipelines'][pipeline]['Consumer'] = [ #"muonntuple", "jetntuple", ] return conf
import os import pytest import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') DEB_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos', 'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-dbg'] RPM_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos', 'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-debuginfo'] RPM_NEW_CENTOS_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-mongos-debuginfo', 'percona-server-mongodb-server-debuginfo', 'percona-server-mongodb-shell-debuginfo', 'percona-server-mongodb-tools-debuginfo', 'percona-server-mongodb-debugsource'] BINARIES = ['mongo', 'mongod', 'mongos', 'bsondump', 'mongoexport', 'mongofiles', 'mongoimport', 'mongorestore', 'mongotop', 'mongostat'] PSMDB42_VER = "4.2" def test_package_script(host): with host.sudo(): result = host.run("/package-testing/package_check.sh psmdb42") print(result.stdout) print(result.stderr) assert result.rc == 0, result.stderr def test_version_script(host): with host.sudo(): result = host.run("/package-testing/version_check.sh psmdb42") print(result.stdout) print(result.stderr) assert result.rc == 0, result.stderr @pytest.mark.parametrize("package", DEB_PACKAGES) def test_deb_packages(host, package): os = host.system_info.distribution if os.lower() in ["redhat", "centos", 'rhel']: pytest.skip("This test only for Debian based platforms") pkg = host.package(package) assert pkg.is_installed assert PSMDB42_VER in pkg.version @pytest.mark.parametrize("package", RPM_PACKAGES) def test_rpm_packages(host, package): os = host.system_info.distribution if os in ["debian", "ubuntu"]: pytest.skip("This test only for RHEL based platforms") if float(host.system_info.release) >= 8.0: pytest.skip("Only for centos7 tests") pkg = host.package(package) assert pkg.is_installed assert PSMDB42_VER in pkg.version @pytest.mark.parametrize("package", RPM_NEW_CENTOS_PACKAGES) def test_rpm8_packages(host, package): os = host.system_info.distribution if os in ["debian", "ubuntu"]: pytest.skip("This test only for RHEL based platforms") if float(host.system_info.release) < 8.0: pytest.skip("Only for centos7 tests") pkg = host.package(package) assert pkg.is_installed assert PSMDB42_VER in pkg.version @pytest.mark.parametrize("binary", BINARIES) def test_binary_version(host, binary): cmd = '{} --version|head -n1|grep -c "{}"'.format(binary, PSMDB42_VER) result = host.run(cmd) assert result.rc == 0, result.stdout def test_bats(host): cmd = "/usr/local/bin/bats /package-testing/bats/mongo-init-scripts.bats" with host.sudo(): result = host.run(cmd) print(result.stdout) print(result.stderr) assert result.rc == 0, result.stdout def test_service(host): with host.sudo(): assert host.service("mongod").is_running def test_data_is_there(host): cmd = " /package-testing/scripts/mongo_check.sh" with host.sudo(): result = host.run(cmd) print(result.stdout) print(result.stderr) assert result.rc == 0, result.stdout def test_functional(host): with host.sudo(): result = host.run("/package-testing/scripts/psmdb_test.sh 4.2") assert result.rc == 0, result.stderr
"""Unit tests for utility functions.""" from __future__ import absolute_import from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite class HoldingPenUtilsTest(InvenioTestCase): """Test basic utility functions for Holding Pen.""" def test_get_previous_next_objects_empty(self): """Test the getting of prev, next object ids from the list.""" from invenio.modules.workflows.utils import get_previous_next_objects objects = [] self.assertEqual(get_previous_next_objects(objects, 1), (None, None)) def test_get_previous_next_objects_not_there(self): """Test the getting of prev, next object ids from the list.""" from invenio.modules.workflows.utils import get_previous_next_objects objects = [3, 4] self.assertEqual(get_previous_next_objects(objects, 42), (None, None)) def test_get_previous_next_objects_previous(self): """Test the getting of prev, next object ids from the list.""" from invenio.modules.workflows.utils import get_previous_next_objects objects = [3, 4] self.assertEqual(get_previous_next_objects(objects, 4), (3, None)) def test_get_previous_next_objects_next(self): """Test the getting of prev, next object ids from the list.""" from invenio.modules.workflows.utils import get_previous_next_objects objects = [3, 4] self.assertEqual(get_previous_next_objects(objects, 3), (None, 4)) def test_get_previous_next_objects_previous_next(self): """Test the getting of prev, next object ids from the list.""" from invenio.modules.workflows.utils import get_previous_next_objects objects = [3, 4, 5] self.assertEqual(get_previous_next_objects(objects, 4), (3, 5)) TEST_SUITE = make_test_suite(HoldingPenUtilsTest) if __name__ == "__main__": run_test_suite(TEST_SUITE)
from __future__ import print_function import atexit import tempfile import shutil import os import sys from gettext import gettext as _ import gtk import pango from . import melddoc from . import misc from . import paths from . import recent from . import tree from . import vc from .ui import emblemcellrenderer from .ui import gnomeglade def _commonprefix(files): if len(files) != 1: workdir = misc.commonprefix(files) else: workdir = os.path.dirname(files[0]) or "." return workdir def cleanup_temp(): temp_location = tempfile.gettempdir() # The strings below will probably end up as debug log, and are deliberately # not marked for translation. for f in _temp_files: try: assert os.path.exists(f) and os.path.isabs(f) and \ os.path.dirname(f) == temp_location os.remove(f) except: except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info()) print("File \"{0}\" not removed due to".format(f), except_str, file=sys.stderr) for f in _temp_dirs: try: assert os.path.exists(f) and os.path.isabs(f) and \ os.path.dirname(f) == temp_location shutil.rmtree(f, ignore_errors=1) except: except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info()) print("Directory \"{0}\" not removed due to".format(f), except_str, file=sys.stderr) _temp_dirs, _temp_files = [], [] atexit.register(cleanup_temp) class CommitDialog(gnomeglade.Component): def __init__(self, parent): gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "commitdialog") self.parent = parent self.widget.set_transient_for( parent.widget.get_toplevel() ) selected = parent._get_selected_files() topdir = _commonprefix(selected) selected = [ s[len(topdir):] for s in selected ] self.changedfiles.set_text( ("(in %s) "%topdir) + " ".join(selected) ) self.widget.show_all() def run(self): self.previousentry.child.set_editable(False) self.previousentry.set_active(0) self.textview.grab_focus() buf = self.textview.get_buffer() buf.place_cursor( buf.get_start_iter() ) buf.move_mark( buf.get_selection_bound(), buf.get_end_iter() ) response = self.widget.run() msg = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), 0) if response == gtk.RESPONSE_OK: self.parent._command_on_selected( self.parent.vc.commit_command(msg) ) if len(msg.strip()): self.previousentry.prepend_text(msg) self.widget.destroy() def on_previousentry_activate(self, gentry): buf = self.textview.get_buffer() buf.set_text( gentry.child.get_text() ) COL_LOCATION, COL_STATUS, COL_REVISION, COL_TAG, COL_OPTIONS, COL_END = \ list(range(tree.COL_END, tree.COL_END+6)) class VcTreeStore(tree.DiffTreeStore): def __init__(self): tree.DiffTreeStore.__init__(self, 1, [str] * 5) entry_modified = lambda x: (x.state >= tree.STATE_NEW) or (x.isdir and (x.state > tree.STATE_NONE)) entry_normal = lambda x: (x.state == tree.STATE_NORMAL) entry_nonvc = lambda x: (x.state == tree.STATE_NONE) or (x.isdir and (x.state > tree.STATE_IGNORED)) entry_ignored = lambda x: (x.state == tree.STATE_IGNORED) or x.isdir class VcView(melddoc.MeldDoc, gnomeglade.Component): # Map action names to VC commands and required arguments list action_vc_cmds_map = { "VcCompare": ("diff_command", ()), "VcCommit": ("commit_command", ("",)), "VcUpdate": ("update_command", ()), "VcAdd": ("add_command", ()), "VcResolved": ("resolved_command", ()), "VcRemove": ("remove_command", ()), "VcRevert": ("revert_command", ()), } state_actions = { "flatten": ("VcFlatten", None), "modified": ("VcShowModified", entry_modified), "normal": ("VcShowNormal", entry_normal), "unknown": ("VcShowNonVC", entry_nonvc), "ignored": ("VcShowIgnored", entry_ignored), } def __init__(self, prefs): melddoc.MeldDoc.__init__(self, prefs) gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "vcview") actions = ( ("VcCompare", gtk.STOCK_DIALOG_INFO, _("_Compare"), None, _("Compare selected"), self.on_button_diff_clicked), ("VcCommit", "vc-commit-24", _("Co_mmit"), None, _("Commit"), self.on_button_commit_clicked), ("VcUpdate", "vc-update-24", _("_Update"), None, _("Update"), self.on_button_update_clicked), ("VcAdd", "vc-add-24", _("_Add"), None, _("Add to VC"), self.on_button_add_clicked), ("VcRemove", "vc-remove-24", _("_Remove"), None, _("Remove from VC"), self.on_button_remove_clicked), ("VcResolved", "vc-resolve-24", _("_Resolved"), None, _("Mark as resolved for VC"), self.on_button_resolved_clicked), ("VcRevert", gtk.STOCK_REVERT_TO_SAVED, None, None, _("Revert to original"), self.on_button_revert_clicked), ("VcDeleteLocally", gtk.STOCK_DELETE, None, None, _("Delete locally"), self.on_button_delete_clicked), ) toggleactions = ( ("VcFlatten", gtk.STOCK_GOTO_BOTTOM, _("_Flatten"), None, _("Flatten directories"), self.on_button_flatten_toggled, False), ("VcShowModified","filter-modified-24", _("_Modified"), None, _("Show modified"), self.on_filter_state_toggled, False), ("VcShowNormal", "filter-normal-24", _("_Normal"), None, _("Show normal"), self.on_filter_state_toggled, False), ("VcShowNonVC", "filter-nonvc-24", _("Non _VC"), None, _("Show unversioned files"), self.on_filter_state_toggled, False), ("VcShowIgnored", "filter-ignored-24", _("Ignored"), None, _("Show ignored files"), self.on_filter_state_toggled, False), ) self.ui_file = paths.ui_dir("vcview-ui.xml") self.actiongroup = gtk.ActionGroup('VcviewActions') self.actiongroup.set_translation_domain("meld") self.actiongroup.add_actions(actions) self.actiongroup.add_toggle_actions(toggleactions) for action in ("VcCompare", "VcFlatten", "VcShowModified", "VcShowNormal", "VcShowNonVC", "VcShowIgnored"): self.actiongroup.get_action(action).props.is_important = True for action in ("VcCommit", "VcUpdate", "VcAdd", "VcRemove", "VcShowModified", "VcShowNormal", "VcShowNonVC", "VcShowIgnored", "VcResolved"): button = self.actiongroup.get_action(action) button.props.icon_name = button.props.stock_id self.model = VcTreeStore() self.widget.connect("style-set", self.model.on_style_set) self.treeview.set_model(self.model) selection = self.treeview.get_selection() selection.set_mode(gtk.SELECTION_MULTIPLE) selection.connect("changed", self.on_treeview_selection_changed) self.treeview.set_headers_visible(1) self.treeview.set_search_equal_func(self.treeview_search_cb) self.current_path, self.prev_path, self.next_path = None, None, None column = gtk.TreeViewColumn( _("Name") ) renicon = emblemcellrenderer.EmblemCellRenderer() rentext = gtk.CellRendererText() column.pack_start(renicon, expand=0) column.pack_start(rentext, expand=1) col_index = self.model.column_index column.set_attributes(renicon, icon_name=col_index(tree.COL_ICON, 0), icon_tint=col_index(tree.COL_TINT, 0)) column.set_attributes(rentext, text=col_index(tree.COL_TEXT, 0), foreground_gdk=col_index(tree.COL_FG, 0), style=col_index(tree.COL_STYLE, 0), weight=col_index(tree.COL_WEIGHT, 0), strikethrough=col_index(tree.COL_STRIKE, 0)) self.treeview.append_column(column) def addCol(name, num): column = gtk.TreeViewColumn(name) rentext = gtk.CellRendererText() column.pack_start(rentext, expand=0) column.set_attributes(rentext, markup=self.model.column_index(num, 0)) self.treeview.append_column(column) return column self.treeview_column_location = addCol( _("Location"), COL_LOCATION) addCol(_("Status"), COL_STATUS) addCol(_("Rev"), COL_REVISION) addCol(_("Tag"), COL_TAG) addCol(_("Options"), COL_OPTIONS) self.state_filters = [] for s in self.state_actions: if s in self.prefs.vc_status_filters: action_name = self.state_actions[s][0] self.state_filters.append(s) self.actiongroup.get_action(action_name).set_active(True) class ConsoleStream(object): def __init__(self, textview): self.textview = textview b = textview.get_buffer() self.mark = b.create_mark("END", b.get_end_iter(), 0) def write(self, s): if s: b = self.textview.get_buffer() b.insert(b.get_end_iter(), s) self.textview.scroll_mark_onscreen( self.mark ) self.consolestream = ConsoleStream(self.consoleview) self.location = None self.treeview_column_location.set_visible(self.actiongroup.get_action("VcFlatten").get_active()) if not self.prefs.vc_console_visible: self.on_console_view_toggle(self.console_hide_box) self.vc = None self.valid_vc_actions = tuple() # VC ComboBox self.combobox_vcs = gtk.ComboBox() self.combobox_vcs.lock = True self.combobox_vcs.set_model(gtk.ListStore(str, object, bool)) cell = gtk.CellRendererText() self.combobox_vcs.pack_start(cell, False) self.combobox_vcs.add_attribute(cell, 'text', 0) self.combobox_vcs.add_attribute(cell, 'sensitive', 2) self.combobox_vcs.lock = False self.hbox2.pack_end(self.combobox_vcs, expand=False) self.combobox_vcs.show() self.combobox_vcs.connect("changed", self.on_vc_change) def on_container_switch_in_event(self, ui): melddoc.MeldDoc.on_container_switch_in_event(self, ui) self.scheduler.add_task(self.on_treeview_cursor_changed) def update_actions_sensitivity(self): """Disable actions that use not implemented VC plugin methods """ valid_vc_actions = ["VcDeleteLocally"] for action_name, (meth_name, args) in self.action_vc_cmds_map.items(): action = self.actiongroup.get_action(action_name) try: getattr(self.vc, meth_name)(*args) action.props.sensitive = True valid_vc_actions.append(action_name) except NotImplementedError: action.props.sensitive = False self.valid_vc_actions = tuple(valid_vc_actions) def choose_vc(self, vcs): """Display VC plugin(s) that can handle the location""" self.combobox_vcs.lock = True self.combobox_vcs.get_model().clear() tooltip_texts = [_("Choose one Version Control"), _("Only one Version Control in this directory")] default_active = -1 valid_vcs = [] # Try to keep the same VC plugin active on refresh() for idx, avc in enumerate(vcs): # See if the necessary version control command exists. If so, # make sure what we're diffing is a valid respository. If either # check fails don't let the user select the that version control # tool and display a basic error message in the drop-down menu. err_str = "" if vc._vc.call(["which", avc.CMD]): # TRANSLATORS: this is an error message when a version control # application isn't installed or can't be found err_str = _("%s Not Installed" % avc.CMD) elif not avc.valid_repo(): # TRANSLATORS: this is an error message when a version # controlled repository is invalid or corrupted err_str = _("Invalid Repository") else: valid_vcs.append(idx) if (self.vc is not None and self.vc.__class__ == avc.__class__): default_active = idx if err_str: self.combobox_vcs.get_model().append( \ [_("%s (%s)") % (avc.NAME, err_str), avc, False]) else: self.combobox_vcs.get_model().append([avc.NAME, avc, True]) if valid_vcs and default_active == -1: default_active = min(valid_vcs) self.combobox_vcs.set_tooltip_text(tooltip_texts[len(vcs) == 1]) self.combobox_vcs.set_sensitive(len(vcs) > 1) self.combobox_vcs.lock = False self.combobox_vcs.set_active(default_active) def on_vc_change(self, cb): if not cb.lock: self.vc = cb.get_model()[cb.get_active_iter()][1] self._set_location(self.vc.location) self.update_actions_sensitivity() def set_location(self, location): self.choose_vc(vc.get_vcs(os.path.abspath(location or "."))) def _set_location(self, location): self.location = location self.current_path = None self.model.clear() self.fileentry.set_filename(location) self.fileentry.prepend_history(location) it = self.model.add_entries( None, [location] ) self.treeview.grab_focus() self.treeview.get_selection().select_iter(it) self.model.set_path_state(it, 0, tree.STATE_NORMAL, isdir=1) self.recompute_label() self.scheduler.remove_all_tasks() # If the user is just diffing a file (ie not a directory), there's no # need to scan the rest of the repository if os.path.isdir(self.vc.location): root = self.model.get_iter_root() self.scheduler.add_task(self._search_recursively_iter(root)) self.scheduler.add_task(self.on_treeview_cursor_changed) def get_comparison(self): return recent.TYPE_VC, [self.location] def recompute_label(self): self.label_text = os.path.basename(self.location) # TRANSLATORS: This is the location of the directory the user is diffing self.tooltip_text = _("%s: %s") % (_("Location"), self.location) self.label_changed() def _search_recursively_iter(self, iterstart): yield _("[%s] Scanning %s") % (self.label_text,"") rootpath = self.model.get_path( iterstart ) rootname = self.model.value_path( self.model.get_iter(rootpath), 0 ) prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) ) todo = [ (rootpath, rootname) ] active_action = lambda a: self.actiongroup.get_action(a).get_active() filters = [a[1] for a in self.state_actions.values() if \ active_action(a[0]) and a[1]] def showable(entry): for f in filters: if f(entry): return 1 recursive = self.actiongroup.get_action("VcFlatten").get_active() self.vc.cache_inventory(rootname) while len(todo): todo.sort() # depth first path, name = todo.pop(0) if path: it = self.model.get_iter( path ) root = self.model.value_path( it, 0 ) else: it = self.model.get_iter_root() root = name yield _("[%s] Scanning %s") % (self.label_text, root[prefixlen:]) entries = [f for f in self.vc.listdir(root) if showable(f)] differences = 0 for e in entries: differences |= (e.state != tree.STATE_NORMAL) if e.isdir and recursive: todo.append( (None, e.path) ) continue child = self.model.add_entries(it, [e.path]) self._update_item_state( child, e, root[prefixlen:] ) if e.isdir: todo.append( (self.model.get_path(child), None) ) if not recursive: # expand parents if len(entries) == 0: self.model.add_empty(it, _("(Empty)")) if differences or len(path)==1: self.treeview.expand_to_path(path) else: # just the root self.treeview.expand_row( (0,), 0) self.vc.uncache_inventory() def on_fileentry_activate(self, fileentry): path = fileentry.get_full_path() self.set_location(path) def on_delete_event(self, appquit=0): self.scheduler.remove_all_tasks() return gtk.RESPONSE_OK def on_row_activated(self, treeview, path, tvc): it = self.model.get_iter(path) if self.model.iter_has_child(it): if self.treeview.row_expanded(path): self.treeview.collapse_row(path) else: self.treeview.expand_row(path,0) else: path = self.model.value_path(it, 0) self.run_diff( [path] ) def run_diff_iter(self, path_list): silent_error = hasattr(self.vc, 'switch_to_external_diff') retry_diff = True while retry_diff: retry_diff = False yield _("[%s] Fetching differences") % self.label_text diffiter = self._command_iter(self.vc.diff_command(), path_list, 0) diff = None while type(diff) != type(()): diff = next(diffiter) yield 1 prefix, patch = diff[0], diff[1] try: patch = self.vc.clean_patch(patch) except AttributeError: pass yield _("[%s] Applying patch") % self.label_text if patch: applied = self.show_patch(prefix, patch, silent=silent_error) if not applied and silent_error: silent_error = False self.vc.switch_to_external_diff() retry_diff = True else: for path in path_list: self.emit("create-diff", [path]) def run_diff(self, path_list): try: for path in path_list: comp_path = self.vc.get_path_for_repo_file(path) os.chmod(comp_path, 0o444) _temp_files.append(comp_path) self.emit("create-diff", [comp_path, path]) except NotImplementedError: for path in path_list: self.scheduler.add_task(self.run_diff_iter([path]), atfront=1) def on_treeview_popup_menu(self, treeview): time = gtk.get_current_event_time() self.popup_menu.popup(None, None, None, 0, time) return True def on_button_press_event(self, treeview, event): if event.button == 3: path = treeview.get_path_at_pos(int(event.x), int(event.y)) if path is None: return False selection = treeview.get_selection() model, rows = selection.get_selected_rows() if path[0] not in rows: selection.unselect_all() selection.select_path(path[0]) treeview.set_cursor(path[0]) self.popup_menu.popup(None, None, None, event.button, event.time) return True return False def on_button_flatten_toggled(self, button): action = self.actiongroup.get_action("VcFlatten") self.treeview_column_location.set_visible(action.get_active()) self.on_filter_state_toggled(button) def on_filter_state_toggled(self, button): active_action = lambda a: self.actiongroup.get_action(a).get_active() active_filters = [a for a in self.state_actions if \ active_action(self.state_actions[a][0])] if set(active_filters) == set(self.state_filters): return self.state_filters = active_filters self.prefs.vc_status_filters = active_filters self.refresh() def on_treeview_selection_changed(self, selection): model, rows = selection.get_selected_rows() have_selection = bool(rows) for action in self.valid_vc_actions: self.actiongroup.get_action(action).set_sensitive(have_selection) def _get_selected_files(self): model, rows = self.treeview.get_selection().get_selected_rows() sel = [self.model.value_path(self.model.get_iter(r), 0) for r in rows] # Remove empty entries and trailing slashes return [x[-1] != "/" and x or x[:-1] for x in sel if x is not None] def _command_iter(self, command, files, refresh): """Run 'command' on 'files'. Return a tuple of the directory the command was executed in and the output of the command. """ msg = misc.shelljoin(command) yield "[%s] %s" % (self.label_text, msg.replace("\n", "\t")) def relpath(pbase, p): kill = 0 if len(pbase) and p.startswith(pbase): kill = len(pbase) + 1 return p[kill:] or "." if len(files) == 1 and os.path.isdir(files[0]): workdir = self.vc.get_working_directory(files[0]) else: workdir = self.vc.get_working_directory( _commonprefix(files) ) files = [ relpath(workdir, f) for f in files ] r = None self.consolestream.write( misc.shelljoin(command+files) + " (in %s)\n" % workdir) readiter = misc.read_pipe_iter(command + files, self.consolestream, workdir=workdir) try: while r is None: r = next(readiter) self.consolestream.write(r) yield 1 except IOError as e: misc.run_dialog("Error running command.\n'%s'\n\nThe error was:\n%s" % ( misc.shelljoin(command), e), parent=self, messagetype=gtk.MESSAGE_ERROR) if refresh: self.refresh_partial(workdir) yield workdir, r def _command(self, command, files, refresh=1): """Run 'command' on 'files'. """ self.scheduler.add_task(self._command_iter(command, files, refresh)) def _command_on_selected(self, command, refresh=1): files = self._get_selected_files() if len(files): self._command(command, files, refresh) def on_button_update_clicked(self, obj): self._command_on_selected(self.vc.update_command()) def on_button_commit_clicked(self, obj): CommitDialog(self).run() def on_button_add_clicked(self, obj): self._command_on_selected(self.vc.add_command()) def on_button_remove_clicked(self, obj): self._command_on_selected(self.vc.remove_command()) def on_button_resolved_clicked(self, obj): self._command_on_selected(self.vc.resolved_command()) def on_button_revert_clicked(self, obj): self._command_on_selected(self.vc.revert_command()) def on_button_delete_clicked(self, obj): files = self._get_selected_files() for name in files: try: if os.path.isfile(name): os.remove(name) elif os.path.isdir(name): if misc.run_dialog(_("'%s' is a directory.\nRemove recursively?") % os.path.basename(name), parent = self, buttonstype=gtk.BUTTONS_OK_CANCEL) == gtk.RESPONSE_OK: shutil.rmtree(name) except OSError as e: misc.run_dialog(_("Error removing %s\n\n%s.") % (name,e), parent = self) workdir = _commonprefix(files) self.refresh_partial(workdir) def on_button_diff_clicked(self, obj): files = self._get_selected_files() if len(files): self.run_diff(files) def open_external(self): self._open_files(self._get_selected_files()) def show_patch(self, prefix, patch, silent=False): if vc._vc.call(["which", "patch"]): primary = _("Patch tool not found") secondary = _("Meld needs the <i>patch</i> tool to be installed " "to perform comparisons in %s repositories. Please " "install <i>patch</i> and try again.") % self.vc.NAME msgarea = self.msgarea_mgr.new_from_text_and_icon( gtk.STOCK_DIALOG_ERROR, primary, secondary) msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE) msgarea.connect("response", lambda *args: self.msgarea_mgr.clear()) msgarea.show_all() return False tmpdir = tempfile.mkdtemp("-meld") _temp_dirs.append(tmpdir) diffs = [] for fname in self.vc.get_patch_files(patch): destfile = os.path.join(tmpdir,fname) destdir = os.path.dirname( destfile ) if not os.path.exists(destdir): os.makedirs(destdir) pathtofile = os.path.join(prefix, fname) try: shutil.copyfile( pathtofile, destfile) except IOError: # it is missing, create empty file open(destfile,"w").close() diffs.append( (destfile, pathtofile) ) patchcmd = self.vc.patch_command(tmpdir) try: result = misc.write_pipe(patchcmd, patch, error=misc.NULL) except OSError: result = 1 if result == 0: for d in diffs: os.chmod(d[0], 0o444) self.emit("create-diff", d) return True elif not silent: primary = _("Error fetching original comparison file") secondary = _("Meld couldn't obtain the original version of your " "comparison file. If you are using the most recent " "version of Meld, please report a bug, including as " "many details as possible.") msgarea = self.msgarea_mgr.new_from_text_and_icon( gtk.STOCK_DIALOG_ERROR, primary, secondary) msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE) msgarea.add_button(_("Report a bug"), gtk.RESPONSE_OK) def patch_error_cb(msgarea, response): if response == gtk.RESPONSE_OK: bug_url = "https://bugzilla.gnome.org/enter_bug.cgi?" + \ "product=meld" misc.open_uri(bug_url) else: self.msgarea_mgr.clear() msgarea.connect("response", patch_error_cb) msgarea.show_all() return False def refresh(self): self.set_location( self.model.value_path( self.model.get_iter_root(), 0 ) ) def refresh_partial(self, where): if not self.actiongroup.get_action("VcFlatten").get_active(): it = self.find_iter_by_name( where ) if it: newiter = self.model.insert_after( None, it) self.model.set_value(newiter, self.model.column_index( tree.COL_PATH, 0), where) self.model.set_path_state(newiter, 0, tree.STATE_NORMAL, True) self.model.remove(it) self.scheduler.add_task(self._search_recursively_iter(newiter)) else: # XXX fixme self.refresh() def _update_item_state(self, it, vcentry, location): e = vcentry self.model.set_path_state(it, 0, e.state, e.isdir) def setcol(col, val): self.model.set_value(it, self.model.column_index(col, 0), val) setcol(COL_LOCATION, location) setcol(COL_STATUS, e.get_status()) setcol(COL_REVISION, e.rev) setcol(COL_TAG, e.tag) setcol(COL_OPTIONS, e.options) def on_file_changed(self, filename): it = self.find_iter_by_name(filename) if it: path = self.model.value_path(it, 0) self.vc.update_file_state(path) files = self.vc.lookup_files([], [(os.path.basename(path), path)])[1] for e in files: if e.path == path: prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) ) self._update_item_state( it, e, e.parent[prefixlen:]) return def find_iter_by_name(self, name): it = self.model.get_iter_root() path = self.model.value_path(it, 0) while it: if name == path: return it elif name.startswith(path): child = self.model.iter_children( it ) while child: path = self.model.value_path(child, 0) if name == path: return child elif name.startswith(path): break else: child = self.model.iter_next( child ) it = child else: break return None def on_console_view_toggle(self, box, event=None): if box == self.console_hide_box: self.prefs.vc_console_visible = 0 self.console_hbox.hide() self.console_show_box.show() else: self.prefs.vc_console_visible = 1 self.console_hbox.show() self.console_show_box.hide() def on_consoleview_populate_popup(self, text, menu): item = gtk.ImageMenuItem(gtk.STOCK_CLEAR) def activate(*args): buf = text.get_buffer() buf.delete( buf.get_start_iter(), buf.get_end_iter() ) item.connect("activate", activate) item.show() menu.insert( item, 0 ) item = gtk.SeparatorMenuItem() item.show() menu.insert( item, 1 ) def on_treeview_cursor_changed(self, *args): cursor_path, cursor_col = self.treeview.get_cursor() if not cursor_path: self.emit("next-diff-changed", False, False) self.current_path = cursor_path return # If invoked directly rather than through a callback, we always check if not args: skip = False else: try: old_cursor = self.model.get_iter(self.current_path) except (ValueError, TypeError): # An invalid path gives ValueError; None gives a TypeError skip = False else: # We can skip recalculation if the new cursor is between # the previous/next bounds, and we weren't on a changed row state = self.model.get_state(old_cursor, 0) if state not in (tree.STATE_NORMAL, tree.STATE_EMPTY): skip = False else: if self.prev_path is None and self.next_path is None: skip = True elif self.prev_path is None: skip = cursor_path < self.next_path elif self.next_path is None: skip = self.prev_path < cursor_path else: skip = self.prev_path < cursor_path < self.next_path if not skip: prev, next = self.model._find_next_prev_diff(cursor_path) self.prev_path, self.next_path = prev, next have_next_diffs = (prev is not None, next is not None) self.emit("next-diff-changed", *have_next_diffs) self.current_path = cursor_path def next_diff(self, direction): if direction == gtk.gdk.SCROLL_UP: path = self.prev_path else: path = self.next_path if path: self.treeview.expand_to_path(path) self.treeview.set_cursor(path) def on_reload_activate(self, *extra): self.on_fileentry_activate(self.fileentry) def on_find_activate(self, *extra): self.treeview.emit("start-interactive-search") def treeview_search_cb(self, model, column, key, it): """Callback function for searching in VcView treeview""" path = model.get_value(it, tree.COL_PATH) # if query text contains slash, search in full path if key.find('/') >= 0: lineText = path else: lineText = os.path.basename(path) # Perform case-insensitive matching if query text is all lower-case if key.islower(): lineText = lineText.lower() if lineText.find(key) >= 0: # line matches return False else: return True
""" blank screen to stop game being played out-of-hours """ import random import os import pygame import pygame.locals from ctime_common import go_fullscreen class BlankScreen(): """ a blank screen with no controls """ def __init__(self, ctime, screen_width, screen_height, log): log.info('Time for bed said Zeberdee') self.screen_size = {'width': screen_width, 'height': screen_height} self.screen = pygame.display.get_surface() self.screen.fill(pygame.Color(0, 0, 0, 0), (0, 0, screen_width, screen_height), 0) log.info('Lights out') ctime.button_power.rpi_power() go_fullscreen()
""" Asynchronous communication to phone. Mostly you should use only L{GammuWorker} class, others are only helpers which are used by this class. """ import queue import threading import gammu class InvalidCommand(Exception): """ Exception indicating invalid command. """ def __init__(self, value): """ Initializes exception. @param value: Name of wrong command. @type value: string """ super().__init__() self.value = value def __str__(self): """ Returns textual representation of exception. """ return f'Invalid command: "{self.value}"' def check_worker_command(command): """ Checks whether command is valid. @param command: Name of command. @type command: string """ if hasattr(gammu.StateMachine, command): return raise InvalidCommand(command) class GammuCommand: """ Storage of single command for gammu. """ def __init__(self, command, params=None, percentage=100): """ Creates single command instance. """ check_worker_command(command) self._command = command self._params = params self._percentage = percentage def get_command(self): """ Returns command name. """ return self._command def get_params(self): """ Returns command params. """ return self._params def get_percentage(self): """ Returns percentage of current task. """ return self._percentage def __str__(self): """ Returns textual representation. """ if self._params is not None: return f"{self._command} {self._params}" else: return f"{self._command} ()" class GammuTask: """ Storage of taks for gammu. """ def __init__(self, name, commands): """ Creates single command instance. @param name: Name of task. @type name: string @param commands: List of commands to execute. @type commands: list of tuples or strings """ self._name = name self._list = [] self._pointer = 0 for i in range(len(commands)): if isinstance(commands[i], tuple): cmd = commands[i][0] try: params = commands[i][1] except IndexError: params = None else: cmd = commands[i] params = None percents = round(100 * (i + 1) / len(commands)) self._list.append(GammuCommand(cmd, params, percents)) def get_next(self): """ Returns next command to be executed as L{GammuCommand}. """ result = self._list[self._pointer] self._pointer += 1 return result def get_name(self): """ Returns task name. """ return self._name def gammu_pull_device(state_machine): state_machine.ReadDevice() class GammuThread(threading.Thread): """ Thread for phone communication. """ def __init__(self, queue, config, callback, pull_func=gammu_pull_device): """ Initialises thread data. @param queue: Queue with events. @type queue: queue.Queue object. @param config: Gammu configuration, same as L{StateMachine.SetConfig} accepts. @type config: hash @param callback: Function which will be called upon operation completing. @type callback: Function, needs to accept four params: name of completed operation, result of it, error code and percentage of overall operation. This callback is called from different thread, so please take care of various threading issues in other modules you use. """ super().__init__() self._kill = False self._terminate = False self._sm = gammu.StateMachine() self._callback = callback self._queue = queue self._sm.SetConfig(0, config) self._pull_func = pull_func def _do_command(self, name, cmd, params, percentage=100): """ Executes single command on phone. """ func = getattr(self._sm, cmd) error = "ERR_NONE" result = None try: if params is None: result = func() elif isinstance(params, dict): result = func(**params) else: result = func(*params) except gammu.GSMError as info: errcode = info.args[0]["Code"] error = gammu.ErrorNumbers[errcode] self._callback(name, result, error, percentage) def run(self): """ Thread body, which handles phone communication. This should not be used from outside. """ start = True while not self._kill: try: if start: task = GammuTask("Init", ["Init"]) start = False else: # Wait at most ten seconds for next command task = self._queue.get(True, 10) try: while True: cmd = task.get_next() self._do_command( task.get_name(), cmd.get_command(), cmd.get_params(), cmd.get_percentage(), ) except IndexError: try: if task.get_name() != "Init": self._queue.task_done() except (AttributeError, ValueError): pass except queue.Empty: if self._terminate: break # Read the device to catch possible incoming events try: self._pull_func(self._sm) except Exception as ex: self._callback("ReadDevice", None, ex, 0) def kill(self): """ Forces thread end without emptying queue. """ self._kill = True def join(self, timeout=None): """ Terminates thread and waits for it. """ self._terminate = True super().join(timeout) class GammuWorker: """ Wrapper class for asynchronous communication with Gammu. It spaws own thread and then passes all commands to this thread. When task is done, caller is notified via callback. """ def __init__(self, callback, pull_func=gammu_pull_device): """ Initializes worker class. @param callback: See L{GammuThread.__init__} for description. """ self._thread = None self._callback = callback self._config = {} self._lock = threading.Lock() self._queue = queue.Queue() self._pull_func = pull_func def enqueue_command(self, command, params): """ Enqueues command. @param command: Command(s) to execute. Each command is tuple containing function name and it's parameters. @type command: tuple of list of tuples @param params: Parameters to command. @type params: tuple or string """ self._queue.put(GammuTask(command, [(command, params)])) def enqueue_task(self, command, commands): """ Enqueues task. @param command: Command(s) to execute. Each command is tuple containing function name and it's parameters. @type command: tuple of list of tuples @param commands: List of commands to execute. @type commands: list of tuples or strings """ self._queue.put(GammuTask(command, commands)) def enqueue(self, command, params=None, commands=None): """ Enqueues command or task. @param command: Command(s) to execute. Each command is tuple containing function name and it's parameters. @type command: tuple of list of tuples @param params: Parameters to command. @type params: tuple or string @param commands: List of commands to execute. When this is not none, params are ignored and command is taken as task name. @type commands: list of tuples or strings """ if commands is not None: self.enqueue_task(command, commands) else: self.enqueue_command(command, params) def configure(self, config): """ Configures gammu instance according to config. @param config: Gammu configuration, same as L{StateMachine.SetConfig} accepts. @type config: hash """ self._config = config def abort(self): """ Aborts any remaining operations. """ raise NotImplementedError def initiate(self): """ Connects to phone. """ self._thread = GammuThread( self._queue, self._config, self._callback, self._pull_func ) self._thread.start() def terminate(self, timeout=None): """ Terminates phone connection. """ self.enqueue("Terminate") self._thread.join(timeout) self._thread = None
import os import sys from merge_utils import * xml_out = etree.Element("packages") funtoo_staging_w = GitTree("funtoo-staging", "master", "repos@localhost:ports/funtoo-staging.git", root="/var/git/dest-trees/funtoo-staging", pull=False, xml_out=xml_out) xmlfile="/home/ports/public_html/packages.xml" nopush=False funtoo_overlay = GitTree("funtoo-overlay", "master", "repos@localhost:funtoo-overlay.git", pull=True) p = os.path.join(funtoo_overlay.root,"funtoo/scripts/commit-staged") if os.path.exists(p): a = open(p,"r") commit = a.readlines()[0].strip() print("Using commit: %s" % commit) else: commit = None gentoo_staging_r = GitTree("gentoo-staging", "master", "repos@localhost:ports/gentoo-staging.git", commit=commit, pull=True) shards = { "perl" : GitTree("gentoo-perl-shard", "1fc10379b04cb4aaa29e824288f3ec22badc6b33", "repos@localhost:gentoo-perl-shard.git", pull=True), "kde" : GitTree("gentoo-kde-shard", "cd4e1129ddddaa21df367ecd4f68aab894e57b31", "repos@localhost:gentoo-kde-shard.git", pull=True), "gnome" : GitTree("gentoo-gnome-shard", "ffabb752f8f4e23a865ffe9caf72f950695e2f26", "repos@localhost:ports/gentoo-gnome-shard.git", pull=True), "x11" : GitTree("gentoo-x11-shard", "12c1bdf9a9bfd28f48d66bccb107c17b5f5af577", "repos@localhost:ports/gentoo-x11-shard.git", pull=True), "office" : GitTree("gentoo-office-shard", "9a702057d23e7fa277e9626344671a82ce59442f", "repos@localhost:ports/gentoo-office-shard.git", pull=True), "core" : GitTree("gentoo-core-shard", "56e5b9edff7dc27e828b71010d019dcbd8e176fd", "repos@localhost:gentoo-core-shard.git", pull=True) } funtoo_overlays = { "funtoo_media" : GitTree("funtoo-media", "master", "repos@localhost:funtoo-media.git", pull=True), "plex_overlay" : GitTree("funtoo-plex", "master", "https://github.com/Ghent/funtoo-plex.git", pull=True), #"gnome_fixups" : GitTree("gnome-3.16-fixups", "master", "repos@localhost:ports/gnome-3.16-fixups.git", pull=True), "gnome_fixups" : GitTree("gnome-3.20-fixups", "master", "repos@localhost:ports/gnome-3.20-fixups.git", pull=True), "funtoo_toolchain" : GitTree("funtoo-toolchain", "b97787318b7ffcfeaacde82cd21ddd5e207ad1f4", "repos@localhost:funtoo-toolchain-overlay.git", pull=True), "ldap_overlay" : GitTree("funtoo-ldap", "master", "repos@localhost:funtoo-ldap-overlay.git", pull=True), "deadbeef_overlay" : GitTree("deadbeef-overlay", "master", "https://github.com/damex/deadbeef-overlay.git", pull=True), "gambas_overlay" : GitTree("gambas-overlay", "master", "https://github.com/damex/gambas-overlay.git", pull=True), "wmfs_overlay" : GitTree("wmfs-overlay", "master", "https://github.com/damex/wmfs-overlay.git", pull=True), "flora" : GitTree("flora", "master", "repos@localhost:flora.git", pull=True), } other_overlays = { "foo_overlay" : GitTree("foo-overlay", "master", "https://github.com/slashbeast/foo-overlay.git", pull=True), "bar_overlay" : GitTree("bar-overlay", "master", "git://github.com/adessemond/bar-overlay.git", pull=True), "squeezebox_overlay" : GitTree("squeezebox", "master", "git://anongit.gentoo.org/user/squeezebox.git", pull=True), "pantheon_overlay" : GitTree("pantheon", "master", "https://github.com/pimvullers/elementary.git", pull=True), "pinsard_overlay" : GitTree("pinsard", "master", "https://github.com/apinsard/sapher-overlay.git", pull=True), "sabayon_for_gentoo" : GitTree("sabayon-for-gentoo", "master", "git://github.com/Sabayon/for-gentoo.git", pull=True), "tripsix_overlay" : GitTree("tripsix", "master", "https://github.com/666threesixes666/tripsix.git", pull=True), "faustoo_overlay" : GitTree("faustoo", "master", "https://github.com/fmoro/faustoo.git", pull=True), "wltjr_overlay" : GitTree("wltjr", "master", "https://github.com/Obsidian-StudiosInc/os-xtoo", pull=True), "vmware_overlay" : GitTree("vmware", "master", "git://anongit.gentoo.org/proj/vmware.git", pull=True) } funtoo_changes = False if funtoo_overlay.changes: funtoo_changes = True elif gentoo_staging_r.changes: funtoo_changes = True else: for fo in funtoo_overlays: if funtoo_overlays[fo].changes: funtoo_changes = True break pull = True if nopush: push = False else: push = "master" base_steps = [ GitCheckout("master"), SyncFromTree(gentoo_staging_r, exclude=[ "/metadata/cache/**", "ChangeLog", "dev-util/metro", "skel.ChangeLog", ]), ] profile_steps = [ SyncDir(funtoo_overlay.root, "profiles", "profiles", exclude=["categories", "updates"]), CopyAndRename("profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/subarch", "profiles/funtoo/1.0/linux-gnu/arch/pure64/subarch", lambda x: os.path.basename(x) + "-pure64"), SyncFiles(gentoo_staging_r.root, { "profiles/package.mask":"profiles/package.mask/00-gentoo", "profiles/arch/amd64/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/01-gentoo", "profiles/features/multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/package.use.mask/02-gentoo", "profiles/arch/amd64/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/use.mask/01-gentoo", "profiles/arch/x86/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/package.use.mask/01-gentoo", "profiles/arch/x86/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/x86-32bit/use.mask/01-gentoo", "profiles/default/linux/package.use.mask":"profiles/funtoo/1.0/linux-gnu/package.use.mask/01-gentoo", "profiles/default/linux/use.mask":"profiles/funtoo/1.0/linux-gnu/use.mask/01-gentoo", "profiles/arch/amd64/no-multilib/package.use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.use.mask/01-gentoo", "profiles/arch/amd64/no-multilib/package.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/package.mask/01-gentoo", "profiles/arch/amd64/no-multilib/use.mask":"profiles/funtoo/1.0/linux-gnu/arch/pure64/use.mask/01-gentoo" }), SyncFiles(funtoo_overlays["deadbeef_overlay"].root, { "profiles/package.mask":"profiles/package.mask/deadbeef-mask" }), SyncFiles(funtoo_overlays["wmfs_overlay"].root, { "profiles/package.mask":"profiles/package.mask/wmfs-mask" }) ] profile_steps += [ SyncFiles(funtoo_overlays["funtoo_toolchain"].root, { "profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/current/package.mask/funtoo-toolchain", }), SyncFiles(funtoo_overlays["funtoo_toolchain"].root, { "profiles/package.mask/funtoo-toolchain":"profiles/funtoo/1.0/linux-gnu/build/stable/package.mask/funtoo-toolchain", "profiles/package.mask/funtoo-toolchain-experimental":"profiles/funtoo/1.0/linux-gnu/build/experimental/package.mask/funtoo-toolchain", }), RunSed(["profiles/base/make.defaults"], ["/^PYTHON_TARGETS=/d", "/^PYTHON_SINGLE_TARGET=/d"]), ] ebuild_additions = [ InsertEbuilds(other_overlays["bar_overlay"], select="all", skip=["app-emulation/qemu"], replace=False), InsertEbuilds(other_overlays["squeezebox_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["deadbeef_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["gambas_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["wmfs_overlay"], select="all", skip=None, replace=False), InsertEbuilds(funtoo_overlays["flora"], select="all", skip=None, replace=True, merge=True), ] ebuild_modifications = [ InsertEbuilds(other_overlays["vmware_overlay"], select=[ "app-emulation/vmware-modules" ], skip=None, replace=True, merge=True), InsertEbuilds(other_overlays["pantheon_overlay"], select=[ "x11-libs/granite", "x11-libs/bamf", "x11-themes/plank-theme-pantheon", "pantheon-base/plank", "x11-wm/gala"], skip=None, replace=True, merge=True), InsertEbuilds(other_overlays["faustoo_overlay"], select="all", skip=None, replace=True, merge=True), InsertEbuilds(other_overlays["foo_overlay"], select="all", skip=["sys-fs/mdev-bb", "sys-fs/mdev-like-a-boss", "media-sound/deadbeef", "media-video/handbrake"], replace=["app-shells/rssh"]), InsertEbuilds(funtoo_overlays["plex_overlay"], select=[ "media-tv/plex-media-server" ], skip=None, replace=True), InsertEbuilds(other_overlays["sabayon_for_gentoo"], select=["app-admin/equo", "app-admin/matter", "sys-apps/entropy", "sys-apps/entropy-server", "sys-apps/entropy-client-services","app-admin/rigo", "sys-apps/rigo-daemon", "sys-apps/magneto-core", "x11-misc/magneto-gtk", "x11-misc/magneto-gtk3", "x11-themes/numix-icon-theme", "kde-misc/magneto-kde", "app-misc/magneto-loader", "media-video/kazam" ], replace=True), InsertEbuilds(other_overlays["tripsix_overlay"], select=["media-sound/rakarrack"], skip=None, replace=True, merge=False), InsertEbuilds(other_overlays["pinsard_overlay"], select=["app-portage/chuse", "dev-python/iwlib", "media-sound/pytify", "x11-wm/qtile"], skip=None, replace=True, merge=True), InsertEbuilds(other_overlays["wltjr_overlay"], select=["mail-filter/assp", "mail-mta/netqmail"], skip=None, replace=True, merge=False), ] ebuild_modifications += [ InsertEbuilds(funtoo_overlays["funtoo_media"], select="all", skip=None, replace=True), InsertEbuilds(funtoo_overlays["ldap_overlay"], select="all", skip=["net-nds/openldap"], replace=True), ] eclass_steps = [ SyncDir(funtoo_overlays["deadbeef_overlay"].root,"eclass"), ] treeprep_steps = [ SyncDir(funtoo_overlays["plex_overlay"].root,"licenses"), ] master_steps = [ InsertEbuilds(shards["perl"], select="all", skip=None, replace=True), InsertEclasses(shards["perl"], select=re.compile(".*\.eclass")), InsertEbuilds(shards["x11"], select="all", skip=None, replace=True), InsertEbuilds(shards["office"], select="all", skip=None, replace=True), InsertEbuilds(shards["kde"], select="all", skip=None, replace=True), InsertEclasses(shards["kde"], select=re.compile(".*\.eclass")), InsertEbuilds(shards["gnome"], select="all", skip=None, replace=True), InsertEbuilds(funtoo_overlays["gnome_fixups"], select="all", skip=None, replace=True), InsertEbuilds(shards["core"], select="all", skip=None, replace=True), InsertEclasses(shards["core"], select=re.compile(".*\.eclass")), InsertEbuilds(funtoo_overlays["funtoo_toolchain"], select="all", skip=None, replace=True, merge=False), InsertEbuilds(funtoo_overlay, select="all", skip=None, replace=True), SyncDir(funtoo_overlay.root, "eclass"), SyncDir(funtoo_overlay.root,"licenses"), SyncDir(funtoo_overlay.root,"metadata"), SyncFiles(funtoo_overlay.root, { "COPYRIGHT.txt":"COPYRIGHT.txt", "LICENSE.txt":"LICENSE.txt", "README.rst":"README.rst", "header.txt":"header.txt", }), ] treeprep_steps += [ MergeUpdates(funtoo_overlay.root), AutoGlobMask("dev-lang/python", "python*_pre*", "funtoo-python_pre"), ThirdPartyMirrors(), ProfileDepFix(), Minify(), # Set name of repository as "gentoo". Unset masters. RunSed(["metadata/layout.conf"], ["s/^repo-name = .*/repo-name = gentoo/", "/^masters =/d"]), RunSed(["profiles/repo_name"], ["s/.*/gentoo/"]) ] all_steps = [ base_steps, profile_steps, ebuild_additions, eclass_steps, master_steps, ebuild_modifications, treeprep_steps ] for step in all_steps: funtoo_staging_w.run(step) funtoo_staging_w.gitCommit(message="glorious funtoo updates",branch=push) if xmlfile: a=open(xmlfile,"wb") etree.ElementTree(xml_out).write(a, encoding='utf-8', xml_declaration=True, pretty_print=True) a.close() print("merge-funtoo-staging.py completed successfully.") sys.exit(0)
""" binlog_purge_rpl test for ms test and BUG#22543517 running binlogpurge on second master added to slave replication channels """ import replicate_ms from mysql.utilities.exception import MUTLibError _CHANGE_MASTER = ("CHANGE MASTER TO MASTER_HOST = 'localhost', " "MASTER_USER = 'rpl', MASTER_PASSWORD = 'rpl', " "MASTER_PORT = {0}, MASTER_AUTO_POSITION=1 " "FOR CHANNEL 'master-{1}'") def flush_server_logs_(server, times=5): """Flush logs on a server server[in] the instance server where to flush logs on times[in] number of times to flush the logs. """ # Flush master binary log server.exec_query("SET sql_log_bin = 0") for _ in range(times): server.exec_query("FLUSH LOCAL BINARY LOGS") server.exec_query("SET sql_log_bin = 1") class test(replicate_ms.test): """test binlog purge Utility This test runs the mysqlbinlogpurge utility on a known topology. """ master_datadir = None slaves = None mask_ports = [] def check_prerequisites(self): if not self.servers.get_server(0).check_version_compat(5, 7, 6): raise MUTLibError("Test requires server version 5.7.6 or later") return self.check_num_servers(1) def setup(self): self.res_fname = "result.txt" res = super(test, self).setup() if not res: return False # Setup multiple channels for slave m1_dict = self.get_connection_values(self.server2) m2_dict = self.get_connection_values(self.server3) for master in [self.server2, self.server3]: master.exec_query("SET SQL_LOG_BIN= 0") master.exec_query("GRANT REPLICATION SLAVE ON *.* TO 'rpl'@'{0}' " "IDENTIFIED BY 'rpl'".format(self.server1.host)) master.exec_query("SET SQL_LOG_BIN= 1") self.server1.exec_query("SET GLOBAL relay_log_info_repository = " "'TABLE'") self.server1.exec_query(_CHANGE_MASTER.format(m1_dict[3], 1)) self.server1.exec_query(_CHANGE_MASTER.format(m2_dict[3], 2)) self.server1.exec_query("START SLAVE") return True def run(self): test_num = 0 master1_conn = self.build_connection_string(self.server2).strip(' ') master2_conn = self.build_connection_string(self.server3).strip(' ') cmd_str = "mysqlbinlogpurge.py --master={0} ".format(master1_conn) cmd_opts = ("--discover-slaves={0} --dry-run " "".format(master1_conn.split('@')[0])) test_num += 1 comment = ("Test case {0} - mysqlbinlogpurge: with discover " "and verbose options - master 1".format(test_num)) cmds = ("{0} {1} {2} -vv" "").format(cmd_str, cmd_opts, "binlog_purge{0}.log".format(1)) res = self.run_test_case(0, cmds, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) flush_server_logs_(self.server1) cmd_str = "mysqlbinlogpurge.py --master={0} ".format(master2_conn) test_num += 1 comment = ("Test case {0} - mysqlbinlogpurge: with discover " "and verbose options - master 2".format(test_num)) cmds = ("{0} {1} {2} -vv" "").format(cmd_str, cmd_opts, "binlog_purge{0}.log".format(2)) res = self.run_test_case(0, cmds, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) flush_server_logs_(self.server1) super(test, self).reset_ms_topology() return True def get_result(self): # If run method executes successfully without throwing any exceptions, # then test was successful return True, None def record(self): # Not a comparative test return True def cleanup(self): return super(test, self).cleanup()
from abc import ABCMeta,abstractmethod from my_hue import * def trigger_factory(trigger_type): return None class Trigger(object): __metaclass__ = ABCMeta def __init__(self): self.action() @abstractmethod def action(self): pass class IClickerTrigger(object): def __init__(self, clicker_id, response_info, time_of_trigger, sequence_number): super(IClickerTrigger, self).__init__() self.clicker_id = clicker_id self.response_info = response_info self.time_of_trigger = time_of_trigger self.sequence_number = sequence_number def action(self): print self.response_info button = 'a' if button == 'a': pass
from __future__ import absolute_import from unittest import TestCase from datetime import datetime, timedelta from voeventdb.server.tests.fixtures import fake, packetgen class TestBasicRoutines(TestCase): def setUp(self): self.start = datetime(2015, 1, 1) self.interval = timedelta(minutes=15) def test_timerange(self): n_interval_added = 5 times = [t for t in packetgen.timerange(self.start, self.start+self.interval*n_interval_added, self.interval)] self.assertEqual(n_interval_added, len(times)) self.assertEqual(self.start, times[0]) def test_heartbeat(self): n_interval = 4*6 packets = fake.heartbeat_packets(self.start, self.interval, n_interval) self.assertEqual(n_interval, len(packets))
import sqlite3 import os.path import sys import random def makeDatabase(databaseName): if databaseName[-3:] != ".db": databaseName = databaseName + ".db" conn = sqlite3.connect(databaseName) conn.commit() conn.close() def listToString(list): string = "" for i in list: string += str(i)+"\t" return string[:-1] def stringToList(string): list = [str(line) for line in string.split('\t')] return list class SqliteDB: #connects to the database, alters its name if named incorrectly def __init__(self, databaseName): if databaseName[-3:] != ".db": databaseName = databaseName + ".db" if os.path.isfile(databaseName): self.databaseName = databaseName; self.conn = sqlite3.connect(self.databaseName) self.cursor = self.conn.cursor() else: #sees if database name is unique, so it doesn't overwrite anything sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it") def createTables(self): #creates tables if they do not exist self.cursor.execute("CREATE TABLE IF NOT EXISTS students (wID text, email text, UNIQUE(wID, email) ON CONFLICT ABORT)") self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (labNumber int, wID text, URL text, metadata text, URLsToGrade text)") self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)") self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (labNumber int, URL text, grade text, hidden int, PRIMARY KEY(labNumber, URL, hidden))") self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (labNumber int, URL text, wID text, response text, practice boolean, PRIMARY KEY(labNumber, URL, response))") self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (labNumber int, questionNumber int, questionWebassignNumber int, practice boolean)") weightString = '' for i in range(6): weightString += ', weight'+str(i+1)+' num' self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (labNumber int, wID text"+weightString+", weightSum num)") self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)") self.cursor.execute("CREATE TABLE IF NOT EXISTS grades(labNumber int, wID text, URL text, finalGrade number, finalGradeVector text, rawGrade number, rawGradeVector text)") ##check to see if the tables have already been created #creates columns in tables for each lab specified self.conn.commit() #adds a person into the database, works for both new users and existing ones def addEntry(self, wID, URL, labNumber, metadata = None): if self.databaseName != None and self.conn != None and self.cursor !=None: #If the student did not submit a URL (aka the inputted URL is '') if URL == '': self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,'']) #try putting the student and its URL into the uniqueStudentURL database to check if the URL is unique else: try: self.cursor.execute("INSERT INTO uniqueStudentURL VALUES (?,?,?)", [labNumber, wID, URL]) #if there is no error in inserting to a table where URL has to be unique, put it in the actual student database self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,'']) #if the try fails, that means that the URL is already in the db, duplicate URL found! except: self.cursor.execute("SELECT wID FROM uniqueStudentURL WHERE URL=?", [URL]) print "URL: " + URL + " was initially submitted by: " + self.cursor.fetchall()[0][0] URL = "DUPLICATEURL" self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [labNumber, wID, URL,metadata,'']) self.conn.commit() def addEmail(self, wID, email): try: self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email]) except: print "wID: " + wID + " or email: " + email + " already in database." #retrieves URL for a specific student and specific lab number def getURL(self, wID, labNumber): self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID]) URL = self.cursor.fetchone(); if URL is not None: return (URL[0]) else: return None def addExpertURL(self, labNumber, URL, grade, hidden): self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL]) #adds in a user if not in database already presentURL = self.cursor.fetchone() if presentURL == None: self.cursor.execute("INSERT INTO experts VALUES (?, ?, ?, ?)", [labNumber, URL, listToString(grade), hidden]) self.conn.commit() elif presentURL == URL: print "The URL " + URL + " is already in the expert database" else: sys.exit("Trying to overrite") ##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet #self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade") #print self.cursor.fetchall() #query = ("SELECT {0} FROM expert WHERE wID def getExpertURLs(self, labNumber): self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber]) URLsAndGrades = {} for d in self.cursor.fetchall(): URLsAndGrades[str(d[0])] = stringToList(str(d[1])) return URLsAndGrades def finalize(self, labNumber, seed, N, MOOC=False): ##randomize the youtube URLs #for each wID #put that into the databse under the student ID self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber]) expertURL = [str(d[0]) for d in self.cursor.fetchall()] # find all the hidden expert videos self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber]) hiddenURL = [str(d[0]) for d in self.cursor.fetchall()] #get all the studnet URLs self.cursor.execute("SELECT URL from submissions WHERE labNumber=?", [labNumber]) data = [str(d[0]) for d in self.cursor.fetchall()] #assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above if len(expertURL) + N + 1 <= len(data): pseudoURL = {} for d in expertURL: #if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem #semester, in which case, we skip it if d in data: self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d]) indice = (data.index(d) + 1) % len(data) while data[indice] in expertURL or data[indice] in hiddenURL: indice = (indice + 1) % len(data) pseudoURL[d] = data[indice] data.remove(d) for d in hiddenURL: if d in data: indice = (data.index(d) + 1) % len(data) while data[indice] in expertURL or data[indice] in hiddenURL: indice = (indice + 1) % len(data) pseudoURL[d] = data[indice] data.remove(d) self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber]) noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()] wIDPseudoURL = {} if(data.count('') > 0) and not MOOC: for d in noURLSubmitted: indice = (data.index('') + 1) % len(data) while data[indice] == '': indice = (indice + 1) % len(data) wIDPseudoURL[d] = data[indice] data.remove('') else: while '' in data: data.remove('') self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"]) noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()] if(data.count("DUPLICATEURL") > 0) and not MOOC: for d in noURLSubmitted: indice = (data.index("DUPLICATEURL") + 1) % len(data) while data[indice] == "DUPLICATEURL": indice = (indice + 1) % len(data) wIDPseudoURL[d] = data[indice] data.remove("DUPLICATEURL") else: while '' in data: data.remove('') #self.cursor.execute(query) random.shuffle(data) selectFrom = data + data[:N + len(expertURL) + 1] if len(pseudoURL.keys()) > 0: # params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL") for key in pseudoURL.keys(): startIndex = selectFrom.index(pseudoURL[key]) URLSToGrade = selectFrom[startIndex: startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=?", [listToString(expertURL + URLSToGrade), key]) self.conn.commit() if len(wIDPseudoURL.keys()) > 0: for key in wIDPseudoURL.keys(): startIndex = selectFrom.index(wIDPseudoURL[key]) URLSToGrade = selectFrom[startIndex: startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE wID=?", [listToString(expertURL + URLSToGrade), key]) self.conn.commit() if len(data) > N: for d in data: startIndex = selectFrom.index(d) URLSToGrade = selectFrom[startIndex:startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) # params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL") self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber]) self.conn.commit() def getURLsToGrade(self, wID, labNumber): self.cursor.execute("Select URLsToGrade FROM submissions WHERE wID=? and labNumber=?", [wID, labNumber]) dbExtract = self.cursor.fetchone() if dbExtract == None: return False else: return [i for i in stringToList(dbExtract[0])] def addGrade(self, wID, labNumber, URL, grade , practice = False): URLsToGrade = self.getURLsToGrade(wID, labNumber) if URLsToGrade != False: if URL in URLsToGrade: self.cursor.execute("INSERT INTO responses VALUES(?, ?, ?, ?, ?)", [labNumber, URL, wID, listToString(grade), practice]) self.conn.commit() else: print "wID: " + wID + " was not assigned to grade URL: " + URL else: print("wID: " + wID + " not in the submissions table") def wIDGradesSubmitted(self, wID, labNumber): URLsToGrade = self.getURLsToGrade(wID, labNumber) gradesSubmitted = {} for URL in URLsToGrade: self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL]) dbExtract = self.cursor.fetchall() #if they did not grade the URL assigned to them if dbExtract!=[]: gradesSubmitted[URL] = stringToList(str(dbExtract[0][0])) else: gradesSubmitted[URL] = None return gradesSubmitted def compareToExpert(self, wID, labNumber): expertURLsAndGrades = self.getExpertURLs(labNumber) userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber) URLsGraded = userSubmittedGrades.keys() for key in expertURLsAndGrades.keys(): if key in URLsGraded: print expertURLsAndGrades[key] print userSubmittedGrades[key] def getGrades(self, wID, labNumber): URL = self.getURL(wID, labNumber) self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL]) grades = {} for d in self.cursor.fetchall(): grades[str(d[1])] = str(d[0]) return grades def check(self, labNumber): # params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None) self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''") fetch = self.cursor.fetchall() individualURL = [str(d[0]) for d in fetch] URLList = listToString([str(d[1]) for d in fetch]) for i in range(1, len(individualURL)-1): if individualURL[i] not in stringToList(URLList[i]): print individualURL[i] return False return True if False: os.remove("test.db") makeDatabase("test.db") sqldb = SqliteDB("test.db") sqldb.createTables() sqldb.addEntry("1", "1lkjsdf", 1) sqldb.addEntry("2", "1lkjsdf", 1) sqldb.addEntry("3", "1lkjsdf", 1) sqldb.addEntry("4", "4lkjsdf", 1) # sqldb.addEntry("4a",None , 2) sqldb.addEntry("5", "5lkjsdf", 1) sqldb.addEntry("6", "6lkjsdf", 1) sqldb.addEntry("7", "7lkjsdf", 1) sqldb.getURL("1", 1) sqldb.getURL("2", 1) sqldb.addExpertURL(1, "5lkjsdf",[1, 2, 3, 4, 5, 6, 7], 0) sqldb.addExpertURL(1, "2lkjsdf", [1, 7, 3, 1, 6, 3], 0) # sqldb.addEntry("8", None, 2) sqldb.addEntry("8", '', 1) sqldb.addEntry(9, "hidden", 1) sqldb.addExpertURL(1, "hidden", [1, 2, 3], 1) print "testing below" sqldb.finalize(1, 1, 3) print sqldb.getURLsToGrade("1", 1) sqldb.addGrade("1",1, "5lkjsdf", [1, 2, 3, 4]) sqldb.addGrade("12",1, "asdf", 1) sqldb.addGrade("1", 1, "2kjla", 1) sqldb.addGrade("2", "1", "5lkjsdf", [4, 3, 2, 1]) sqldb.wIDGradesSubmitted("1", 1) sqldb.getGrades("5", 1) sqldb.getExpertURLs(1) sqldb.compareToExpert("1",1) sqldb.check(1)
from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin from sklearn.preprocessing import LabelEncoder from sklearn.externals import six from sklearn.base import clone from sklearn.pipeline import _name_estimators import numpy as np import operator class MajorityVoteClassifier(BaseEstimator,ClassifierMixin): """ A majority vote ensemble classifier Parameters ---------- classifiers : array-like, shape = [n_classifiers] Different classifiers for the ensemble vote : str, {'classlabel', 'probability'} Default: 'classlabel' If 'classlabel' the prediction is based on the argmax of class labels. Else if 'probability', the argmax of the sum of probabilities is used to predict the class label (recommended for calibrated classifiers). weights : array-like, shape = [n_classifiers] Optional, default: None If a list of `int` or `float` values are provided , the classifiers are weithed by importance; Uses uniform weights if 'weights = None' """ def __init__(self, classifiers,vote='classlabel', weights=None): self.classifiers = classifiers self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)} self.vote = vote self.weights = weights def fit(self, X, y): """ Fit classifiers. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Matrix of training samples. y : array-like, shape = [n_samples] Vector of target class labels. Returns ------- self : object """ # Use LabelEncoder to ensure class labels start # with 0, which is important for np.argmax # call in self.predict self.lablenc_ = LabelEncoder() self.lablenc_.fit(y) self.classes_ = self.lablenc_.classes_ self.classifiers_ = [] for clf in self.classifiers: fitted_clf = clone(clf).fit(X,self.lablenc_.transform(y)) self.classifiers_.append(fitted_clf) return self def predict(self, X): """ Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix}, Shape = [n_samples, n_features] Matrix of training samples Returns ---------- maj_vote : array-like, shape = [n_samples] Predicted class labels. """ if self.vote == 'probability': maj_vote = np.argmax(self.predict_proba(X),axis=1) else: # 'classlabel' vote # Collect results from clf.predict calls predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,weights=self.weights)),axis=1,arr=predictions) maj_vote = self.lablenc_.inverse_transform(maj_vote) return maj_vote def predict_proba(self, X): """ Predict class probabilities for X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- avg_proba : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. """ probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_]) avg_proba = np.average(probas,axis=0, weights=self.weights) return avg_proba def get_params(self, deep=True): """ Get classifier parameter names for GridSearch""" if not deep: return super(MajorityVoteClassifier,self).get_params(deep=False) else: out = self.named_classifiers.copy() for name, step in six.iteritems(self.named_classifiers): for key, value in six.iteritems(step.get_params(deep=True)): out['%s__%s' % (name, key)] = value return out from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder iris = datasets.load_iris() X,y = iris.data[50:,[1,2]],iris.target[50:] le = LabelEncoder() y = le.fit_transform(y) X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.5,random_state = 1) from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline import numpy as np clf1 = LogisticRegression(penalty = 'l2',C = 0.001,random_state = 0) clf2 = DecisionTreeClassifier(max_depth = 1,criterion = 'entropy',random_state = 0) clf3 = KNeighborsClassifier(n_neighbors = 1,p=2,metric = 'minkowski') pipe1 = Pipeline([['sc',StandardScaler()],['clf',clf1]]) pipe3 = Pipeline([['sc',StandardScaler()],['clf',clf3]]) clf_labels = ['Logistic Regression','Decision Tree','KNN'] print('10-fold cross validation:\n') for clf,label in zip([pipe1,clf2,pipe3],clf_labels): scores = cross_val_score(estimator = clf, X=X_train, y=y_train, cv=10, scoring = 'roc_auc') print ("ROC AUC: %0.2f (+/- %0.2f) [%s]" % (scores.mean(),scores.std(),label)) mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3]) clf_labels += ['Majority Voting'] all_clf = [pipe1, clf2, pipe3, mv_clf] for clf, label in zip(all_clf, clf_labels): scores = cross_val_score(estimator=clf,X=X_train,y=y_train,cv=10,scoring='roc_auc') print("Accuracy: %0.2f (+/- %0.2f) [%s]"% (scores.mean(), scores.std(), label)) from sklearn.metrics import roc_curve from sklearn.metrics import auc import matplotlib.pyplot as plt colors = ['black','orange','blue','green'] linestyles = [':', '--', '-.', '-'] for clf,label,clr,ls in zip(all_clf,clf_labels,colors,linestyles): #assuming the label of the positive class is 1 y_pred = clf.fit(X_train,y_train).predict_proba(X_test)[:,1] fpr,tpr,thresholds = roc_curve(y_true = y_test,y_score = y_pred) roc_auc = auc(x= fpr,y=tpr) plt.plot(fpr,tpr,color = clr,linestyle = ls,label = '%s (auc = %0.2f)' % (label,roc_auc)) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],linestyle='--',color='gray',linewidth=2) plt.xlim([-0.1, 1.1]) plt.ylim([-0.1, 1.1]) plt.grid() plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.show() from sklearn.grid_search import GridSearchCV params = {'decisiontreeclassifier__max_depth':[1,2],'pipeline-1__clf__C':[0.001,0.1,100.0]} grid = GridSearchCV(estimator = mv_clf,param_grid=params,cv = 10,scoring = 'roc_auc') grid.fit(X_train,y_train) for params,mean_score,scores in grid.grid_scores_: print('%0.3f +/- %0.2f %r' % (mean_score,scores.std()/2,params)) print('Best parameters : %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_)
from Tools.Profile import profile profile("LOAD:ElementTree") import xml.etree.cElementTree import os profile("LOAD:enigma_skin") from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, addFont, gRGB, eWindowStyleSkinned, getDesktop from Components.config import ConfigSubsection, ConfigText, config from Components.Converter.Converter import Converter from Components.Sources.Source import Source, ObsoleteSource from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_FONTS, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists, SCOPE_SKIN_IMAGE from Tools.Import import my_import from Tools.LoadPixmap import LoadPixmap from Components.RcModel import rc_model from Components.SystemInfo import SystemInfo colorNames = {} fonts = { "Body": ("Regular", 18, 22, 16), "ChoiceList": ("Regular", 20, 24, 18), } parameters = {} def dump(x, i=0): print " " * i + str(x) try: for n in x.childNodes: dump(n, i + 1) except: None class SkinError(Exception): def __init__(self, message): self.msg = message def __str__(self): return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg) dom_skins = [ ] def addSkin(name, scope = SCOPE_SKIN): # read the skin filename = resolveFilename(scope, name) if fileExists(filename): mpath = os.path.dirname(filename) + "/" try: dom_skins.append((mpath, xml.etree.cElementTree.parse(filename).getroot())) except: print "[SKIN ERROR] error in %s" % filename return False else: return True return False def skin_user_skinname(): name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml" filename = resolveFilename(SCOPE_CONFIG, name) if fileExists(filename): return name return None config.skin = ConfigSubsection() DEFAULT_SKIN = SystemInfo["HasFullHDSkinSupport"] and "PLi-FullNightHD/skin.xml" or "PLi-HD/skin.xml" if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)): # in that case, fallback to Magic (which is an SD skin) DEFAULT_SKIN = "Magic/skin.xml" if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)): DEFAULT_SKIN = "skin.xml" config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN) profile("LoadSkin") res = None name = skin_user_skinname() if name: res = addSkin(name, SCOPE_CONFIG) if not name or not res: addSkin('skin_user.xml', SCOPE_CONFIG) addSkin('skin_box.xml') addSkin('skin_second_infobar.xml') display_skin_id = 1 addSkin('skin_display.xml') addSkin('skin_text.xml') addSkin('skin_subtitles.xml') try: if not addSkin(config.skin.primary_skin.value): raise SkinError, "primary skin not found" except Exception, err: print "SKIN ERROR:", err skin = DEFAULT_SKIN if config.skin.primary_skin.value == skin: skin = 'skin.xml' print "defaulting to standard skin...", skin config.skin.primary_skin.value = skin addSkin(skin) del skin addSkin('skin_default.xml') profile("LoadSkinDefaultDone") def parseCoordinate(s, e, size=0, font=None): s = s.strip() if s == "center": # for speed, can be common case val = (e - size)/2 elif s == '*': return None else: try: val = int(s) # for speed except: if 't' in s: s = s.replace("center", str((e-size)/2.0)) if 'e' in s: s = s.replace("e", str(e)) if 'c' in s: s = s.replace("c", str(e/2.0)) if 'w' in s: s = s.replace("w", "*" + str(fonts[font][3])) if 'h' in s: s = s.replace("h", "*" + str(fonts[font][2])) if '%' in s: s = s.replace("%", "*" + str(e/100.0)) try: val = int(s) # for speed except: val = eval(s) if val < 0: return 0 return int(val) # make sure an integer value is returned def getParentSize(object, desktop): size = eSize() if object: parent = object.getParent() # For some widgets (e.g. ScrollLabel) the skin attributes are applied to # a child widget, instead of to the widget itself. In that case, the parent # we have here is not the real parent, but it is the main widget. # We have to go one level higher to get the actual parent. # We can detect this because the 'parent' will not have a size yet # (the main widget's size will be calculated internally, as soon as the child # widget has parsed the skin attributes) if parent and parent.size().isEmpty(): parent = parent.getParent() if parent: size = parent.size() elif desktop: #widget has no parent, use desktop size instead for relative coordinates size = desktop.size() return size def parseValuePair(s, scale, object = None, desktop = None, size = None): x, y = s.split(',') parentsize = eSize() if object and ('c' in x or 'c' in y or 'e' in x or 'e' in y or '%' in x or '%' in y): # need parent size for ce% parentsize = getParentSize(object, desktop) xval = parseCoordinate(x, parentsize.width(), size and size.width() or 0) yval = parseCoordinate(y, parentsize.height(), size and size.height() or 0) return (xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1]) def parsePosition(s, scale, object = None, desktop = None, size = None): (x, y) = parseValuePair(s, scale, object, desktop, size) return ePoint(x, y) def parseSize(s, scale, object = None, desktop = None): (x, y) = parseValuePair(s, scale, object, desktop) return eSize(x, y) def parseFont(s, scale): try: f = fonts[s] name = f[0] size = f[1] except: name, size = s.split(';') return gFont(name, int(size) * scale[0][0] / scale[0][1]) def parseColor(s): if s[0] != '#': try: return colorNames[s] except: raise SkinError("color '%s' must be #aarrggbb or valid named color" % s) return gRGB(int(s[1:], 0x10)) def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))): # walk all attributes size = None pos = None font = None for attrib, value in node.items(): if attrib not in ignore: if attrib in filenames: value = resolveFilename(SCOPE_CURRENT_SKIN, value, path_prefix=skin_path_prefix) # Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder) # it needs to be set at least before the size is set, in order for the # window dimensions to be calculated correctly in all situations. # If wfNoBorder is applied after the size has been set, the window will fail to clear the title area. # Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after # the size, a scrollbar will not be shown until the selection moves for the first time if attrib == 'size': size = value.encode("utf-8") elif attrib == 'position': pos = value.encode("utf-8") elif attrib == 'font': font = value.encode("utf-8") skinAttributes.append((attrib, font)) else: skinAttributes.append((attrib, value.encode("utf-8"))) if pos is not None: pos, size = context.parse(pos, size, font) skinAttributes.append(('position', pos)) if size is not None: skinAttributes.append(('size', size)) def morphRcImagePath(value): if rc_model.rcIsDefault() is False: if value == '/usr/share/enigma2/skin_default/rc.png' or value == '/usr/share/enigma2/skin_default/rcold.png': value = rc_model.getRcImg() return value def loadPixmap(path, desktop): option = path.find("#") if option != -1: path = path[:option] ptr = LoadPixmap(morphRcImagePath(path), desktop) if ptr is None: raise SkinError("pixmap file %s not found!" % path) return ptr class AttributeParser: def __init__(self, guiObject, desktop, scale=((1,1),(1,1))): self.guiObject = guiObject self.desktop = desktop self.scaleTuple = scale def applyOne(self, attrib, value): try: getattr(self, attrib)(value) except AttributeError: print "[Skin] Attribute not implemented:", attrib, "value:", value except SkinError, ex: print "[Skin] Error:", ex def applyAll(self, attrs): for attrib, value in attrs: self.applyOne(attrib, value) def conditional(self, value): pass def position(self, value): if isinstance(value, tuple): self.guiObject.move(ePoint(*value)) else: self.guiObject.move(parsePosition(value, self.scaleTuple, self.guiObject, self.desktop, self.guiObject.csize())) def size(self, value): if isinstance(value, tuple): self.guiObject.resize(eSize(*value)) else: self.guiObject.resize(parseSize(value, self.scaleTuple, self.guiObject, self.desktop)) def title(self, value): self.guiObject.setTitle(_(value)) def text(self, value): self.guiObject.setText(_(value)) def font(self, value): self.guiObject.setFont(parseFont(value, self.scaleTuple)) def zPosition(self, value): self.guiObject.setZPosition(int(value)) def itemHeight(self, value): self.guiObject.setItemHeight(int(value)) def pixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setPixmap(ptr) def backgroundPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setBackgroundPicture(ptr) def selectionPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setSelectionPicture(ptr) def sliderPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setSliderPicture(ptr) def scrollbarbackgroundPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setScrollbarBackgroundPicture(ptr) def alphatest(self, value): self.guiObject.setAlphatest( { "on": 1, "off": 0, "blend": 2, }[value]) def scale(self, value): self.guiObject.setScale(1) def orientation(self, value): # used by eSlider try: self.guiObject.setOrientation(* { "orVertical": (self.guiObject.orVertical, False), "orTopToBottom": (self.guiObject.orVertical, False), "orBottomToTop": (self.guiObject.orVertical, True), "orHorizontal": (self.guiObject.orHorizontal, False), "orLeftToRight": (self.guiObject.orHorizontal, False), "orRightToLeft": (self.guiObject.orHorizontal, True), }[value]) except KeyError: print "oprientation must be either orVertical or orHorizontal!" def valign(self, value): try: self.guiObject.setVAlign( { "top": self.guiObject.alignTop, "center": self.guiObject.alignCenter, "bottom": self.guiObject.alignBottom }[value]) except KeyError: print "valign must be either top, center or bottom!" def halign(self, value): try: self.guiObject.setHAlign( { "left": self.guiObject.alignLeft, "center": self.guiObject.alignCenter, "right": self.guiObject.alignRight, "block": self.guiObject.alignBlock }[value]) except KeyError: print "halign must be either left, center, right or block!" def textOffset(self, value): x, y = value.split(',') self.guiObject.setTextOffset(ePoint(int(x) * self.scaleTuple[0][0] / self.scaleTuple[0][1], int(y) * self.scaleTuple[1][0] / self.scaleTuple[1][1])) def flags(self, value): flags = value.split(',') for f in flags: try: fv = eWindow.__dict__[f] self.guiObject.setFlag(fv) except KeyError: print "illegal flag %s!" % f def backgroundColor(self, value): self.guiObject.setBackgroundColor(parseColor(value)) def backgroundColorSelected(self, value): self.guiObject.setBackgroundColorSelected(parseColor(value)) def foregroundColor(self, value): self.guiObject.setForegroundColor(parseColor(value)) def foregroundColorSelected(self, value): self.guiObject.setForegroundColorSelected(parseColor(value)) def shadowColor(self, value): self.guiObject.setShadowColor(parseColor(value)) def selectionDisabled(self, value): self.guiObject.setSelectionEnable(0) def transparent(self, value): self.guiObject.setTransparent(int(value)) def borderColor(self, value): self.guiObject.setBorderColor(parseColor(value)) def borderWidth(self, value): self.guiObject.setBorderWidth(int(value)) def scrollbarMode(self, value): self.guiObject.setScrollbarMode(getattr(self.guiObject, value)) # { "showOnDemand": self.guiObject.showOnDemand, # "showAlways": self.guiObject.showAlways, # "showNever": self.guiObject.showNever, # "showLeft": self.guiObject.showLeft # }[value]) def enableWrapAround(self, value): self.guiObject.setWrapAround(True) def itemHeight(self, value): self.guiObject.setItemHeight(int(value)) def pointer(self, value): (name, pos) = value.split(':') pos = parsePosition(pos, self.scaleTuple) ptr = loadPixmap(name, self.desktop) self.guiObject.setPointer(0, ptr, pos) def seek_pointer(self, value): (name, pos) = value.split(':') pos = parsePosition(pos, self.scaleTuple) ptr = loadPixmap(name, self.desktop) self.guiObject.setPointer(1, ptr, pos) def shadowOffset(self, value): self.guiObject.setShadowOffset(parsePosition(value, self.scaleTuple)) def noWrap(self, value): self.guiObject.setNoWrap(1) def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))): # Someone still using applySingleAttribute? AttributeParser(guiObject, desktop, scale).applyOne(attrib, value) def applyAllAttributes(guiObject, desktop, attributes, scale): AttributeParser(guiObject, desktop, scale).applyAll(attributes) def loadSingleSkinData(desktop, skin, path_prefix): """loads skin data like colors, windowstyle etc.""" assert skin.tag == "skin", "root element in skin must be 'skin'!" for c in skin.findall("output"): id = c.attrib.get('id') if id: id = int(id) else: id = 0 if id == 0: # framebuffer for res in c.findall("resolution"): get_attr = res.attrib.get xres = get_attr("xres") if xres: xres = int(xres) else: xres = 720 yres = get_attr("yres") if yres: yres = int(yres) else: yres = 576 bpp = get_attr("bpp") if bpp: bpp = int(bpp) else: bpp = 32 #print "Resolution:", xres,yres,bpp from enigma import gMainDC gMainDC.getInstance().setResolution(xres, yres) desktop.resize(eSize(xres, yres)) if bpp != 32: # load palette (not yet implemented) pass if yres >= 1080: parameters["FileListName"] = (68,4,1000,34) parameters["FileListIcon"] = (7,4,52,37) parameters["FileListMultiName"] = (90,3,1000,32) parameters["FileListMultiIcon"] = (45, 4, 30, 30) parameters["FileListMultiLock"] = (2,0,36,36) parameters["ChoicelistDash"] = (0,3,1000,30) parameters["ChoicelistName"] = (68,3,1000,30) parameters["ChoicelistIcon"] = (7,0,52,38) parameters["PluginBrowserName"] = (180,8,38) parameters["PluginBrowserDescr"] = (180,42,25) parameters["PluginBrowserIcon"] = (15,8,150,60) parameters["PluginBrowserDownloadName"] = (120,8,38) parameters["PluginBrowserDownloadDescr"] = (120,42,25) parameters["PluginBrowserDownloadIcon"] = (15,0,90,76) parameters["ServiceInfo"] = (0,0,450,50) parameters["ServiceInfoLeft"] = (0,0,450,45) parameters["ServiceInfoRight"] = (450,0,1000,45) parameters["SelectionListDescr"] = (45,3,1000,32) parameters["SelectionListLock"] = (0,2,36,36) parameters["ConfigListSeperator"] = 300 parameters["VirtualKeyboard"] = (68,68) parameters["PartnerBoxEntryListName"] = (8,2,225,38) parameters["PartnerBoxEntryListIP"] = (180,2,225,38) parameters["PartnerBoxEntryListPort"] = (405,2,150,38) parameters["PartnerBoxEntryListType"] = (615,2,150,38) parameters["PartnerBoxTimerServicename"] = (0,0,45) parameters["PartnerBoxTimerName"] = (0,42,30) parameters["PartnerBoxE1TimerTime"] = (0,78,255,30) parameters["PartnerBoxE1TimerState"] = (255,78,255,30) parameters["PartnerBoxE2TimerTime"] = (0,78,225,30) parameters["PartnerBoxE2TimerState"] = (225,78,225,30) parameters["PartnerBoxE2TimerIcon"] = (1050,8,20,20) parameters["PartnerBoxE2TimerIconRepeat"] = (1050,38,20,20) parameters["PartnerBoxBouquetListName"] = (0,0,45) parameters["PartnerBoxChannelListName"] = (0,0,45) parameters["PartnerBoxChannelListTitle"] = (0,42,30) parameters["PartnerBoxChannelListTime"] = (0,78,225,30) parameters["HelpMenuListHlp"] = (0,0,900,42) parameters["HelpMenuListExtHlp0"] = (0,0,900,39) parameters["HelpMenuListExtHlp1"] = (0,42,900,30) parameters["AboutHddSplit"] = 1 parameters["DreamexplorerName"] = (62,0,1200,38) parameters["DreamexplorerIcon"] = (15,4,30,30) parameters["PicturePlayerThumb"] = (30,285,45,300,30,25) parameters["PlayListName"] = (38,2,1000,34) parameters["PlayListIcon"] = (7,7,24,24) parameters["SHOUTcastListItem"] = (30,27,35,96,35,33,60,32) for skininclude in skin.findall("include"): filename = skininclude.attrib.get("filename") if filename: skinfile = resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix) if not fileExists(skinfile): skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix) if fileExists(skinfile): print "[SKIN] loading include:", skinfile loadSkin(skinfile) for c in skin.findall("colors"): for color in c.findall("color"): get_attr = color.attrib.get name = get_attr("name") color = get_attr("value") if name and color: colorNames[name] = parseColor(color) #print "Color:", name, color else: raise SkinError("need color and name, got %s %s" % (name, color)) for c in skin.findall("fonts"): for font in c.findall("font"): get_attr = font.attrib.get filename = get_attr("filename", "<NONAME>") name = get_attr("name", "Regular") scale = get_attr("scale") if scale: scale = int(scale) else: scale = 100 is_replacement = get_attr("replacement") and True or False render = get_attr("render") if render: render = int(render) else: render = 0 resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix) if not fileExists(resolved_font): #when font is not available look at current skin path skin_path = resolveFilename(SCOPE_CURRENT_SKIN, filename) if fileExists(skin_path): resolved_font = skin_path addFont(resolved_font, name, scale, is_replacement, render) #print "Font: ", resolved_font, name, scale, is_replacement for alias in c.findall("alias"): get = alias.attrib.get try: name = get("name") font = get("font") size = int(get("size")) height = int(get("height", size)) # to be calculated some day width = int(get("width", size)) global fonts fonts[name] = (font, size, height, width) except Exception, ex: print "[SKIN] bad font alias", ex for c in skin.findall("parameters"): for parameter in c.findall("parameter"): get = parameter.attrib.get try: name = get("name") value = get("value") parameters[name] = "," in value and map(int, value.split(",")) or int(value) except Exception, ex: print "[SKIN] bad parameter", ex for c in skin.findall("subtitles"): from enigma import eSubtitleWidget scale = ((1,1),(1,1)) for substyle in c.findall("sub"): get_attr = substyle.attrib.get font = parseFont(get_attr("font"), scale) col = get_attr("foregroundColor") if col: foregroundColor = parseColor(col) haveColor = 1 else: foregroundColor = gRGB(0xFFFFFF) haveColor = 0 col = get_attr("borderColor") if col: borderColor = parseColor(col) else: borderColor = gRGB(0) borderwidth = get_attr("borderWidth") if borderwidth is None: # default: use a subtitle border borderWidth = 3 else: borderWidth = int(borderwidth) face = eSubtitleWidget.__dict__[get_attr("name")] eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth) for windowstyle in skin.findall("windowstyle"): style = eWindowStyleSkinned() style_id = windowstyle.attrib.get("id") if style_id: style_id = int(style_id) else: style_id = 0 # defaults font = gFont("Regular", 20) offset = eSize(20, 5) for title in windowstyle.findall("title"): get_attr = title.attrib.get offset = parseSize(get_attr("offset"), ((1,1),(1,1))) font = parseFont(get_attr("font"), ((1,1),(1,1))) style.setTitleFont(font); style.setTitleOffset(offset) #print " ", font, offset for borderset in windowstyle.findall("borderset"): bsName = str(borderset.attrib.get("name")) for pixmap in borderset.findall("pixmap"): get_attr = pixmap.attrib.get bpName = get_attr("pos") filename = get_attr("filename") if filename and bpName: png = loadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix), desktop) style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png) #print " borderset:", bpName, filename for color in windowstyle.findall("color"): get_attr = color.attrib.get colorType = get_attr("name") color = parseColor(get_attr("color")) try: style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color) except: raise SkinError("Unknown color %s" % colorType) #pass #print " color:", type, color x = eWindowStyleManager.getInstance() x.setStyle(style_id, style) for margin in skin.findall("margin"): style_id = margin.attrib.get("id") if style_id: style_id = int(style_id) else: style_id = 0 r = eRect(0,0,0,0) v = margin.attrib.get("left") if v: r.setLeft(int(v)) v = margin.attrib.get("top") if v: r.setTop(int(v)) v = margin.attrib.get("right") if v: r.setRight(int(v)) v = margin.attrib.get("bottom") if v: r.setBottom(int(v)) # the "desktop" parameter is hardcoded to the UI screen, so we must ask # for the one that this actually applies to. getDesktop(style_id).setMargins(r) dom_screens = {} def loadSkin(name, scope = SCOPE_SKIN): # Now a utility for plugins to add skin data to the screens global dom_screens, display_skin_id filename = resolveFilename(scope, name) if fileExists(filename): path = os.path.dirname(filename) + "/" for elem in xml.etree.cElementTree.parse(filename).getroot(): if elem.tag == 'screen': name = elem.attrib.get('name', None) if name: sid = elem.attrib.get('id', None) if sid and (sid != display_skin_id): # not for this display elem.clear() continue if name in dom_screens: print "loadSkin: Screen already defined elsewhere:", name elem.clear() else: dom_screens[name] = (elem, path) else: elem.clear() else: elem.clear() def loadSkinData(desktop): # Kinda hackish, but this is called once by mytest.py global dom_skins skins = dom_skins[:] skins.reverse() for (path, dom_skin) in skins: loadSingleSkinData(desktop, dom_skin, path) for elem in dom_skin: if elem.tag == 'screen': name = elem.attrib.get('name', None) if name: sid = elem.attrib.get('id', None) if sid and (sid != display_skin_id): # not for this display elem.clear() continue if name in dom_screens: # Kill old versions, save memory dom_screens[name][0].clear() dom_screens[name] = (elem, path) else: # without name, it's useless! elem.clear() else: # non-screen element, no need for it any longer elem.clear() # no longer needed, we know where the screens are now. del dom_skins class additionalWidget: pass class SizeTuple(tuple): def split(self, *args): return (str(self[0]), str(self[1])) def strip(self, *args): return '%s,%s' % self def __str__(self): return '%s,%s' % self class SkinContext: def __init__(self, parent=None, pos=None, size=None, font=None): if parent is not None: if pos is not None: pos, size = parent.parse(pos, size, font) self.x, self.y = pos self.w, self.h = size else: self.x = None self.y = None self.w = None self.h = None def __str__(self): return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h) def parse(self, pos, size, font): if pos == "fill": pos = (self.x, self.y) size = (self.w, self.h) self.w = 0 self.h = 0 else: w,h = size.split(',') w = parseCoordinate(w, self.w, 0, font) h = parseCoordinate(h, self.h, 0, font) if pos == "bottom": pos = (self.x, self.y + self.h - h) size = (self.w, h) self.h -= h elif pos == "top": pos = (self.x, self.y) size = (self.w, h) self.h -= h self.y += h elif pos == "left": pos = (self.x, self.y) size = (w, self.h) self.x += w self.w -= w elif pos == "right": pos = (self.x + self.w - w, self.y) size = (w, self.h) self.w -= w else: size = (w, h) pos = pos.split(',') pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font)) return (SizeTuple(pos), SizeTuple(size)) class SkinContextStack(SkinContext): # A context that stacks things instead of aligning them def parse(self, pos, size, font): if pos == "fill": pos = (self.x, self.y) size = (self.w, self.h) else: w,h = size.split(',') w = parseCoordinate(w, self.w, 0, font) h = parseCoordinate(h, self.h, 0, font) if pos == "bottom": pos = (self.x, self.y + self.h - h) size = (self.w, h) elif pos == "top": pos = (self.x, self.y) size = (self.w, h) elif pos == "left": pos = (self.x, self.y) size = (w, self.h) elif pos == "right": pos = (self.x + self.w - w, self.y) size = (w, self.h) else: size = (w, h) pos = pos.split(',') pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font)) return (SizeTuple(pos), SizeTuple(size)) def readSkin(screen, skin, names, desktop): if not isinstance(names, list): names = [names] # try all skins, first existing one have priority global dom_screens for n in names: myscreen, path = dom_screens.get(n, (None,None)) if myscreen is not None: # use this name for debug output name = n break else: name = "<embedded-in-'%s'>" % screen.__class__.__name__ # otherwise try embedded skin if myscreen is None: myscreen = getattr(screen, "parsedSkin", None) # try uncompiled embedded skin if myscreen is None and getattr(screen, "skin", None): skin = screen.skin print "[SKIN] Parsing embedded skin", name if isinstance(skin, tuple): for s in skin: candidate = xml.etree.cElementTree.fromstring(s) if candidate.tag == 'screen': sid = candidate.attrib.get('id', None) if (not sid) or (int(sid) == display_skin_id): myscreen = candidate break; else: print "[SKIN] Hey, no suitable screen!" else: myscreen = xml.etree.cElementTree.fromstring(skin) if myscreen: screen.parsedSkin = myscreen if myscreen is None: print "[SKIN] No skin to read..." myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>") screen.skinAttributes = [ ] skin_path_prefix = getattr(screen, "skin_path", path) context = SkinContextStack() s = desktop.bounds() context.x = s.left() context.y = s.top() context.w = s.width() context.h = s.height() del s collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",)) context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size')) screen.additionalWidgets = [ ] screen.renderer = [ ] visited_components = set() # now walk all widgets and stuff def process_none(widget, context): pass def process_widget(widget, context): get_attr = widget.attrib.get # ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped # widgets (source->renderer). wname = get_attr('name') wsource = get_attr('source') if wname is None and wsource is None: print "widget has no name and no source!" return if wname: #print "Widget name=", wname visited_components.add(wname) # get corresponding 'gui' object try: attributes = screen[wname].skinAttributes = [ ] except: raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!") # assert screen[wname] is not Source collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',)) elif wsource: # get corresponding source #print "Widget source=", wsource while True: # until we found a non-obsolete source # parse our current "wsource", which might specifiy a "related screen" before the dot, # for example to reference a parent, global or session-global screen. scr = screen # resolve all path components path = wsource.split('.') while len(path) > 1: scr = screen.getRelatedScreen(path[0]) if scr is None: #print wsource #print name raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!") path = path[1:] # resolve the source. source = scr.get(path[0]) if isinstance(source, ObsoleteSource): # however, if we found an "obsolete source", issue warning, and resolve the real source. print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source) print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % (source.removal_date) if source.description: print source.description wsource = source.new_source else: # otherwise, use that source. break if source is None: raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!") wrender = get_attr('render') if not wrender: raise SkinError("you must define a renderer with render= for source '%s'" % wsource) for converter in widget.findall("convert"): ctype = converter.get('type') assert ctype, "'convert'-tag needs a 'type'-attribute" #print "Converter:", ctype try: parms = converter.text.strip() except: parms = "" #print "Params:", parms converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype) c = None for i in source.downstream_elements: if isinstance(i, converter_class) and i.converter_arguments == parms: c = i if c is None: c = converter_class(parms) c.connect(source) source = c renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender) renderer = renderer_class() # instantiate renderer renderer.connect(source) # connect to source attributes = renderer.skinAttributes = [ ] collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source')) screen.renderer.append(renderer) def process_applet(widget, context): try: codeText = widget.text.strip() widgetType = widget.attrib.get('type') code = compile(codeText, "skin applet", "exec") except Exception, ex: raise SkinError("applet failed to compile: " + str(ex)) if widgetType == "onLayoutFinish": screen.onLayoutFinish.append(code) else: raise SkinError("applet type '%s' unknown!" % widgetType) def process_elabel(widget, context): w = additionalWidget() w.widget = eLabel w.skinAttributes = [ ] collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',)) screen.additionalWidgets.append(w) def process_epixmap(widget, context): w = additionalWidget() w.widget = ePixmap w.skinAttributes = [ ] collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',)) screen.additionalWidgets.append(w) def process_screen(widget, context): for w in widget.getchildren(): conditional = w.attrib.get('conditional') if conditional and not [i for i in conditional.split(",") if i in screen.keys()]: continue p = processors.get(w.tag, process_none) try: p(w, context) except SkinError, e: print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e def process_panel(widget, context): n = widget.attrib.get('name') if n: try: s = dom_screens[n] except KeyError: print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name) else: process_screen(s[0], context) layout = widget.attrib.get('layout') if layout == 'stack': cc = SkinContextStack else: cc = SkinContext try: c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font')) except Exception, ex: raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) ) process_screen(widget, c) processors = { None: process_none, "widget": process_widget, "applet": process_applet, "eLabel": process_elabel, "ePixmap": process_epixmap, "panel": process_panel } try: context.x = 0 # reset offsets, all components are relative to screen context.y = 0 # coordinates. process_screen(myscreen, context) except Exception, e: print "[Skin] SKIN ERROR in %s:" % name, e from Components.GUIComponent import GUIComponent nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)] assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components)) # This may look pointless, but it unbinds 'screen' from the nested scope. A better # solution is to avoid the nested scope above and use the context object to pass # things around. screen = None visited_components = None
import logging import tornado.escape import tornado.ioloop import tornado.web import tornado.options import tornado.websocket import tornado.httpserver import os.path from tornado.concurrent import Future from tornado import gen from tornado.options import define, options, parse_command_line import socket import fcntl import struct import random define("port", default=8888, help="run on the given port", type=int) define("debug", default=False, help="run in debug mode") import multiprocessing import controle import time import os import signal import subprocess import sys from platform import uname NAVEGADOR = 'midori -e Fullscreen -a' TEMPO_MS_ATUALIZACAO_HTML = 500 clients = [] queue_joyx = multiprocessing.Queue() queue_joyy = multiprocessing.Queue() queue_joyz = multiprocessing.Queue() queue_velocidade = multiprocessing.Queue() queue_direcao = multiprocessing.Queue() queue_distancia = multiprocessing.Queue() queue_joy_botoes = multiprocessing.Queue() def inicia_navegador(): navegador = subprocess.Popen([NAVEGADOR+' 192.168.42.1:8888'], \ stdout=subprocess.PIPE, \ shell=True, preexec_fn=os.setsid) def fecha_navegador(): processos = subprocess.Popen(['pgrep', NAVEGADOR], stdout=subprocess.PIPE) print 'PID dos processos', processos.stdout for pid in processos.stdout: os.kill(int(pid), signal.SIGTERM) try: time.sleep(3) os.kill(int(pid), 0) print u'erro: o processo %d ainda existe' % pid except OSError as ex: continue def get_ip_address(): # Informa o endereço IP da primeira conexão funcionando # visto em: # http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: ifname = 'eth0' return socket.inet_ntoa(fcntl.ioctl( \ s.fileno(), \ 0x8915, # SIOCGIFADDR \ struct.pack('256s', ifname[:15]) \ )[20:24]) except: try: ifname = 'wlan0' return socket.inet_ntoa(fcntl.ioctl( \ s.fileno(), \ 0x8915, # SIOCGIFADDR \ struct.pack('256s', ifname[:15]) \ )[20:24]) except: return "127.0.0.1" def get_ip_address_interface(ifname): # Informa o endereço de IP de uma rede <ifname> # visto em: # http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: return socket.inet_ntoa(fcntl.ioctl( \ s.fileno(), \ 0x8915, # SIOCGIFADDR \ struct.pack('256s', ifname[:15]) \ )[20:24]) except: return "0.0.0.0" class MainHandler(tornado.web.RequestHandler): # Atende ao GET e POST do cliente def get(self): # é possível via argumento renderizar a página html com # informações interessantes, os comentários devem ter o mesmo # nome da variável da página self.render("index.html", title="LAVAGEM A SECO", \ ip_host=get_ip_address()+":"+str(options.port), \ msg_status="LIGADO") class WebSocketHandler(tornado.websocket.WebSocketHandler): # Todo cliente se encarrega de conectar-se ao servidor websocket. # Quando existe uma nova conexão é salvo qual cliente foi. def open(self): print 'tornado: websocket: aviso: nova conexão de um cliente' clients.append(self) self.write_message("connected") # Quando um cliente envia uma mensagem, esta é a função responsável # por ler e aqui deve ficar a chamada dos get das filas(queue) def on_message(self, message): print 'tornado: websocket: aviso: nova mensagem: %s' % message q = self.application.settings.get('queue') q.put(message) # Para evitar envios de informações a clientes que não existem mais # é necessário retirá-los da lista def on_close(self): print 'tornado: websocket: aviso: conexão finalizada/perdida' clients.remove(self) fecha_navegador() inicia_navegador() def envia_cmd_websocket(cmd, arg): # Facilita o trabalho repetitivo de envia mensagem para todo os clientes # Envia um comando e seu argumento para todos os clientes for c in clients: c.write_message(cmd+";"+arg) def tarefa_atualizacao_html(): # Esta função tem uma chamada periódica, responsável por atualizar os # elementos atualizáveis na página html envia_cmd_websocket("lan", get_ip_address()) envia_cmd_websocket("random", str(random.randint(0,1000))) # para envia algo é necessário que fila tenha algo if not queue_joyx.empty(): resultado = queue_joyx.get() envia_cmd_websocket("joyx", str(resultado)[:6]) if not queue_joyy.empty(): resultado = queue_joyy.get() envia_cmd_websocket("joyy", str(resultado)[:6]) if not queue_joyz.empty(): resultado = queue_joyz.get() envia_cmd_websocket("joyz", str(resultado)[:6]) if not queue_joy_botoes.empty(): resultado = queue_joy_botoes.get() envia_cmd_websocket("b", str(resultado)) if not queue_velocidade.empty(): resultado = queue_velocidade.get() envia_cmd_websocket("v", str(resultado)) if not queue_direcao.empty(): resultado = queue_direcao.get() envia_cmd_websocket("d", str(resultado)) if not queue_distancia.empty(): resultado = queue_distancia.get() envia_cmd_websocket("x", str(resultado)[:6]) def main(): print u"Iniciando o servidor Tornado" fecha_navegador() tarefa_controle = multiprocessing.Queue() # esse loop ler os dados do joystick e envia para o lavos # sem ele, nenhuma resposta do Joystick é atendida. controle_loop = controle.ControleLavagem(tarefa_controle, \ queue_joyx, \ queue_joyy, \ queue_joyz, \ queue_joy_botoes, \ queue_velocidade, \ queue_direcao, \ queue_distancia) controle_loop.daemon = True controle_loop.start() # espera um pouco para que a tarefa esteja realmente pronta # sincronismo é mais interessante? time.sleep(1) tarefa_controle.put("Testando Tarefa :)") parse_command_line() app = tornado.web.Application( [ (r"/", MainHandler), (r"/ws", WebSocketHandler) ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, debug=options.debug, autoreload=True, queue=tarefa_controle, ) # porta que o servidor irá usar app.listen(options.port) # carrega o servidor mas não inicia main_loop = tornado.ioloop.IOLoop.instance() # Aqui será a principal tarefa do lavagem, leitura e acionamento tarefa_atualizacao_html_loop = tornado.ioloop.PeriodicCallback(tarefa_atualizacao_html,\ TEMPO_MS_ATUALIZACAO_HTML, \ io_loop = main_loop) print u"aviso: tornado: start" tarefa_atualizacao_html_loop.start() inicia_navegador() # o loop do servidor deve ser o último, já que não um daemon main_loop.start() if __name__ == "__main__": main()
import os from shutil import copyfile import subprocess from save_embedded_graph27 import main_binary as embed_main from spearmint_ghsom import main as ghsom_main import numpy as np import pickle from time import time def save_obj(obj, name): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with open(name + '.pkl', 'rb') as f: return pickle.load(f) os.chdir("C:\Miniconda3\Jupyter\GHSOM_simplex_dsd") dir = os.path.abspath("parameter_tests_edges") num_repeats = 30 N = 64 if not os.path.isdir(dir): os.mkdir(dir) os.chdir(dir) network = "network.dat" first_level = "community.dat" labels = 'firstlevelcommunity' mu = 0.1 num_edges_ls = [256, 512, 1024] parameter_settings = [0.5, 0.6, 0.7, 0.8, 0.9, 1][::-1] overall_nmi_scores = np.zeros((len(num_edges_ls), len(parameter_settings))) for i in range(len(num_edges_ls)): #number of edges num_edges = num_edges_ls[i] #create directory dir_string = os.path.join(dir, str(num_edges)) if not os.path.isdir(dir_string): os.mkdir(dir_string) #change working directory os.chdir(dir_string) for j in range(len(parameter_settings)): #setting fo e_sg p = parameter_settings[j] #ghsom parameters params = {'w': 0.0001, 'eta': 0.0001, 'sigma': 1, 'e_sg': p, 'e_en': 0.8} #create directory dir_string_p = os.path.join(dir_string, str(p)) if not os.path.isdir(dir_string_p): os.mkdir(dir_string_p) #change working directory os.chdir(dir_string_p) if os.path.isfile('nmi_scores.csv'): print 'already completed {}/{}, loading scores and continuing'.format(k1, p) nmi_scores = np.genfromtxt('nmi_scores.csv', delimiter=',') overall_nmi_scores[i,j] = np.mean(nmi_scores, axis=0) continue #copy executable ex = "benchmark.exe" if not os.path.isfile(ex): source = "C:\\Users\\davem\\Documents\\PhD\\Benchmark Graph Generators\\binary_networks\\benchmark.exe" copyfile(source, ex) #record NMI scores if not os.path.isfile('nmi_scores.pkl'): print 'creating new nmi scores array' nmi_scores = np.zeros(num_repeats) else: print 'loading nmi score progress' nmi_scores = load_obj('nmi_scores') #record running times if not os.path.isfile('running_times.pkl'): print 'creating new running time array' running_times = np.zeros(num_repeats) else: print 'loading running time progress' running_times = load_obj('running_times') print #generate networks for r in range(1, num_repeats+1): #number of communities num_communities = np.random.randint(1,5) #number of nodes in micro community minc = np.floor(float(N) / num_communities) maxc = np.ceil(float(N) / num_communities) #average number of edges k = float(num_edges) / N #max number of edges maxk = 2 * k #make benchmark parameter file filename = "benchmark_flags_{}_{}_{}.dat".format(num_edges,p,r) if not os.path.isfile(filename): print 'number of edges: {}'.format(num_edges) print 'number of communities: {}'.format(num_communities) print '-N {} -k {} -maxk {} -minc {} -maxc {} -mu {}'.format(N, k, maxk, minc, maxc, mu) with open(filename,"w") as f: f.write("-N {} -k {} -maxk {} -minc {} -maxc {} -mu {}".format(N, k, maxk, minc, maxc, mu)) print 'written flag file: {}'.format(filename) #cmd strings change_dir_cmd = "cd {}".format(dir_string_p) generate_network_cmd = "benchmark -f {}".format(filename) #output of cmd output_file = open("cmd_output.out", 'w') network_rename = "{}_{}".format(r,network) first_level_rename = "{}_{}".format(r,first_level) gml_filename = 'embedded_network_{}.gml'.format(r) if not os.path.isfile(network_rename): process = subprocess.Popen(change_dir_cmd + " && " + generate_network_cmd, stdout=output_file, stderr=output_file, shell=True) process.wait() print 'generated graph {}'.format(r) os.rename(network, network_rename) os.rename(first_level, first_level_rename) print 'renamed graph {}'.format(r) if not os.path.isfile(gml_filename): ##embed graph embed_main(network_rename, first_level_rename) print 'embedded graph {} as {} in {}'.format(r, gml_filename, os.getcwd()) ##score for this network if not np.all(nmi_scores[r-1]): start_time = time() print 'starting ghsom for: {}/{}/{}'.format(num_edges, p, gml_filename) nmi_score, communities_detected = ghsom_main(params, gml_filename, labels) nmi_scores[r-1] = nmi_score running_time = time() - start_time print 'running time of algorithm: {}'.format(running_time) running_times[r-1] = running_time #save save_obj(nmi_scores, 'nmi_scores') save_obj(running_times, 'running_times') print 'saved nmi score for network {}: {}'.format(gml_filename, nmi_score) print ##output nmi scores to csv file print 'writing nmi scores and running times to file' np.savetxt('nmi_scores.csv',nmi_scores,delimiter=',') np.savetxt('running_times.csv',running_times,delimiter=',') print #odd to overall list overall_nmi_scores[i,j] = np.mean(nmi_scores, axis=0) print 'DONE' print 'OVERALL NMI SCORES' print overall_nmi_scores for scores in overall_nmi_scores: print scores idx = np.argsort(scores)[::-1] print parameter_settings[idx[0]]
from .base import * CACHES = { 'default' : { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' } }
from unittest import TestCase from pyage.core import inject from pyage_forams.solutions.foram import Foram class TestForam(TestCase): def test_step(self): inject.config = "pyage_forams.conf.dummy_conf" foram = Foram(10) # foram.step()
""" SuperPython - Teste de Funcionalidade Web Verifica a funcionalidade do servidor web. """ __author__ = 'carlo' import unittest import sys import bottle import os import sys import os project_server = os.path.dirname(os.path.abspath(__file__)) project_server = os.path.join(project_server, '../src/') sys.path.insert(0, project_server) templates_dir = os.path.join(project_server, 'server/views/') if templates_dir not in bottle.TEMPLATE_PATH: bottle.TEMPLATE_PATH.insert(0, templates_dir) if sys.version_info[0] == 2: from mock import MagicMock, patch else: from unittest.mock import MagicMock, patch, ANY from webtest import TestApp from server.control import application as appbottle import server.modelo_redis as cs import server.control as ct class FunctionalWebTest(unittest.TestCase): def setUp(self): cs.DBF = '/tmp/redis_test.db' pass def test_default_page(self): """ test_default_page """ app = TestApp(appbottle) response = app.get('/static/index.html') self.assertEqual('200 OK', response.status) self.assertTrue('<title>Jogo Eica - Cadastro</title>' in response.text, response.text[:1000]) def test_default_redirect(self): """test_default_redirect """ app = TestApp(appbottle) response = app.get('/') self.assertEqual('302 Found', response.status) def test_register(self): """test_register """ # app = TestApp(appbottle) # response = app.get('/static/register?doc_id="10000001"&module=projeto2222') rec_id, response = self._get_id('3333') self.assertEqual('200 OK', response.status) self.assertTrue(rec_id in response, str(response)) # rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071' expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015'," received_record = cs.DRECORD.get(rec_id) assert expected_record in str(received_record),\ "{}: {}".format(rec_id, received_record) def _get_id(self, ref_id='e0cb4e39e071', url='/static/register?doc_id="10000001"&module=projeto2222'): """test_store """ app = TestApp(appbottle) user, idade, ano, sexo = 'projeto2222-lastcodename', '00015', '0009', 'outro' user_data = dict(doc_id=ref_id, user=user, idade=idade, ano=ano, sexo=sexo) response = app.get(url, params=user_data) return str(response).split('ver = main("')[1].split('")')[0], response def test_store(self): """test_store """ app = TestApp(appbottle) # response = app.get('/static/register?doc_id="10000001"&module=projeto2222') # rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071' rec_id, _ = self._get_id() response = app.post('/record/store', self._pontua(rec_id)) self.assertEqual('200 OK', response.status) self.assertTrue('", "tempo": "20' in response, str(response)) # self.assertTrue('{"module": "projeto2222", "jogada": [{"carta": "2222",' in str(response), str(response)) expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015'," received_record = str(response) assert expected_record.replace("'", '"') in received_record,\ "{}: {}".format(rec_id, received_record) def _pontua(self, ref_id): ct.LAST = ref_id jogada = {"doc_id": ref_id, "carta": 2222, "casa": 2222, "move": 2222, "ponto": 2222, "tempo": 2222, "valor": 2222} return jogada def test_pontos(self): rec_id, response = self._get_id() app = TestApp(appbottle) app.post('/record/store', self._pontua(rec_id)) ct.LAST = rec_id response = app.get('/pontos') self.assertEqual('200 OK', response.status) self.assertTrue('projeto2222-lastcodename' in response, str(response)) self.assertTrue('<h3>Idade: 10 Genero: outro Ano Escolar: 9</h3>' in response, str(response)) self.assertTrue('<td><span>2222<span></td>' in response, str(response)) if __name__ == '__main__': unittest.main()
import codecs def funct(f_name): """Remove leading and trailing whitespace from file.""" f_read = codecs.open(f_name, 'r') f_lines = f_read.readlines() out_lines = map(str.strip, f_lines) f_read.close() while True: o_write = raw_input("Create new file (c) or overwrite existing (o): ") if o_write.lower() == 'o': # f_name stays the same break elif o_write.lower() == 'c': f_name = raw_input("What is new file name? ") break f_write = codecs.open(f_name, 'w') for line in out_lines: f_write.write(line + '\n') print '"{}" has been written with no leading or trailing \ whitespace.'.format(f_name) def funct_comp(f_name): """Remove leading and trailing whitespace from file w/ comprehension.""" f_read = codecs.open(f_name, 'r') f_lines = f_read.readlines() print f_lines # out_lines = map(str.strip, f_lines) out_lines = [line.strip() for line in f_lines] print out_lines f_read.close() while True: o_write = raw_input("Create new file (c) or overwrite existing (o): ") if o_write.lower() == 'o': # f_name stays the same break elif o_write.lower() == 'c': f_name = raw_input("What is new file name? ") break f_write = codecs.open(f_name, 'w') for line in out_lines: f_write.write(line + '\n') print '"{}" has been written with no leading or trailing \ whitespace.'.format(f_name)
from __future__ import absolute_import from testutil.dott import feature, sh, testtmp # noqa: F401 feature.require(["symlink"]) sh % "hg init repo" sh % "cd repo" sh % "ln -s foo link" sh % "hg add link" sh % "hg ci -mbad link" sh % "hg rm link" sh % "hg ci -mok" sh % "hg diff -g -r '0:1'" > "bad.patch" sh % "hg up 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved" sh % "hg import --no-commit bad.patch" == "applying bad.patch" sh % "hg status" == r""" R link ? bad.patch"""
import pytest from cfme.cloud.provider.azure import AzureProvider from cfme.markers.env_markers.provider import ONE_PER_CATEGORY from cfme.networks.views import BalancerView from cfme.networks.views import CloudNetworkView from cfme.networks.views import FloatingIpView from cfme.networks.views import NetworkPortView from cfme.networks.views import NetworkRouterView from cfme.networks.views import SecurityGroupView from cfme.networks.views import SubnetView from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.provider([AzureProvider], selector=ONE_PER_CATEGORY, scope='module') ] network_collections = [ 'network_providers', 'cloud_networks', 'network_subnets', 'network_ports', 'network_security_groups', 'network_routers', 'network_floating_ips' ] network_test_items = [ ("Cloud Networks", CloudNetworkView), ("Cloud Subnets", SubnetView), ("Network Routers", NetworkRouterView), ("Security Groups", SecurityGroupView), ("Floating IPs", FloatingIpView), ("Network Ports", NetworkPortView), ("Load Balancers", BalancerView) ] def child_visibility(appliance, network_provider, relationship, view): network_provider_view = navigate_to(network_provider, 'Details') if network_provider_view.entities.relationships.get_text_of(relationship) == "0": pytest.skip("There are no relationships for {}".format(relationship)) network_provider_view.entities.relationships.click_at(relationship) relationship_view = appliance.browser.create_view(view) try: if relationship != "Floating IPs": assert relationship_view.entities.entity_names else: assert relationship_view.entities.entity_ids actual_visibility = True except AssertionError: actual_visibility = False return actual_visibility @pytest.mark.parametrize("relationship,view", network_test_items, ids=[rel[0] for rel in network_test_items]) def test_tagvis_network_provider_children(provider, appliance, request, relationship, view, tag, user_restricted): """ Polarion: assignee: anikifor initialEstimate: 1/8h casecomponent: Tagging """ collection = appliance.collections.network_providers.filter({'provider': provider}) network_provider = collection.all()[0] network_provider.add_tag(tag=tag) request.addfinalizer(lambda: network_provider.remove_tag(tag=tag)) actual_visibility = child_visibility(appliance, network_provider, relationship, view) assert actual_visibility with user_restricted: actual_visibility = child_visibility(appliance, network_provider, relationship, view) assert not actual_visibility @pytest.fixture(params=network_collections, scope='module') def entity(request, appliance): collection_name = request.param item_collection = getattr(appliance.collections, collection_name) items = item_collection.all() if items: return items[0] else: pytest.skip("No content found for test") @pytest.mark.parametrize('visibility', [True, False], ids=['visible', 'notVisible']) def test_network_tagvis(check_item_visibility, entity, visibility): """ Tests network provider and its items honors tag visibility Prerequisites: Catalog, tag, role, group and restricted user should be created Steps: 1. As admin add tag 2. Login as restricted user, item is visible for user 3. As admin remove tag 4. Login as restricted user, iten is not visible for user Polarion: assignee: anikifor initialEstimate: 1/4h casecomponent: Tagging """ check_item_visibility(entity, visibility)
import unittest from mock import Mock, patch from expyrimenter import Executor from expyrimenter.runnable import Runnable from subprocess import CalledProcessError from concurrent.futures import ThreadPoolExecutor import re class TestExecutor(unittest.TestCase): output = 'TestExecutor output' outputs = ['TestExecutor 1', 'TestExecutor 2'] def test_runnable_output(self): executor = Executor() with patch.object(Runnable, 'run', return_value=TestExecutor.output): executor.run(Runnable()) executor.wait() results = executor.results self.assertEqual(1, len(results)) self.assertEqual(TestExecutor.output, results[0]) def test_runnable_outputs(self): executor = Executor() runnable = Runnable() with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs): executor.run(runnable) executor.run(runnable) executor.wait() results = executor.results self.assertListEqual(TestExecutor.outputs, results) def test_function_output(self): executor = Executor() executor.run_function(background_function) executor.wait() output = executor.results[0] self.assertEqual(TestExecutor.output, output) def test_function_outputs(self): executor = Executor() runnable = Runnable() with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs): executor.run(runnable) executor.run(runnable) executor.wait() results = executor.results self.assertListEqual(TestExecutor.outputs, results) def test_against_runnable_memory_leak(self): executor = Executor() with patch.object(Runnable, 'run'): executor.run(Runnable()) executor.wait() self.assertEqual(0, len(executor._future_runnables)) def test_against_function_memory_leak(self): executor = Executor() executor.run_function(background_function) executor.wait() self.assertEqual(0, len(executor._function_titles)) def test_if_shutdown_shutdowns_executor(self): executor = Executor() executor._executor = Mock() executor.shutdown() executor._executor.shutdown.called_once_with() def test_if_shutdown_clears_function_resources(self): executor = Executor() executor._function_titles = Mock() executor.shutdown() executor._function_titles.clear.assert_called_once_with() def test_if_shutdown_clears_runnable_resources(self): executor = Executor() executor._future_runnables = Mock() executor.shutdown() executor._future_runnables.clear.assert_called_once_with() def test_exception_logging(self): executor = Executor() executor._log = Mock() with patch.object(Runnable, 'run', side_effect=Exception): executor.run(Runnable) executor.wait() self.assertEqual(1, executor._log.error.call_count) @patch.object(ThreadPoolExecutor, '__init__', return_value=None) def test_specified_max_workers(self, pool_mock): max = 42 Executor(max) pool_mock.assert_called_once_with(42) def test_calledprocesserror_logging(self): executor = Executor() executor._log = Mock() exception = CalledProcessError(returncode=1, cmd='command') with patch.object(Runnable, 'run', side_effect=exception): executor.run(Runnable) executor.wait() self.assertEqual(1, executor._log.error.call_count) def test_if_logged_title_is_hidden_if_it_equals_command(self): command = 'command' runnable = Runnable() runnable.title = command exception = CalledProcessError(returncode=1, cmd=command) runnable.run = Mock(side_effect=exception) executor = Executor() executor._log = Mock() executor.run(runnable) executor.wait() executor._log.error.assert_called_once_with(Matcher(has_not_title)) def test_logged_title_when_it_differs_from_command(self): command, title = 'command', 'title' runnable = Runnable() runnable.title = title exception = CalledProcessError(returncode=1, cmd=command) runnable.run = Mock(side_effect=exception) executor = Executor() executor._log = Mock() executor.run(runnable) executor.wait() executor._log.error.assert_called_once_with(Matcher(has_title)) def has_title(msg): return re.match("(?ims).*Title", msg) is not None def has_not_title(msg): return re.match("(?ims).*Title", msg) is None class Matcher: def __init__(self, compare): self.compare = compare def __eq__(self, msg): return self.compare(msg) def background_function(): return TestExecutor.output if __name__ == '__main__': unittest.main()
from django.contrib import admin from .models import Environment,EnvironmentAdmin,Component,ComponentAdmin,Environment_property,Environment_propertyAdmin,Component_attribute,Component_attributeAdmin admin.site.register(Environment,EnvironmentAdmin) admin.site.register(Component,ComponentAdmin) admin.site.register(Environment_property,Environment_propertyAdmin) admin.site.register(Component_attribute,Component_attributeAdmin)
"HDL Checker installation script" import setuptools # type: ignore import versioneer LONG_DESCRIPTION = open("README.md", "rb").read().decode(encoding='utf8', errors='replace') CLASSIFIERS = """\ Development Status :: 5 - Production/Stable Environment :: Console Intended Audience :: Developers License :: OSI Approved :: GNU General Public License v3 (GPLv3) Operating System :: Microsoft :: Windows Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Topic :: Software Development Topic :: Scientific/Engineering :: Electronic Design Automation (EDA) Topic :: Text Editors :: Integrated Development Environments (IDE) """ setuptools.setup( name = 'hdl_checker', version = versioneer.get_version(), description = 'HDL code checker', long_description = LONG_DESCRIPTION, long_description_content_type = "text/markdown", author = 'Andre Souto', author_email = 'andre820@gmail.com', url = 'https://github.com/suoto/hdl_checker', license = 'GPLv3', keywords = 'VHDL Verilog SystemVerilog linter LSP language server protocol vimhdl vim-hdl', platforms = 'any', packages = setuptools.find_packages(), install_requires = ['argcomplete', 'argparse', 'backports.functools_lru_cache; python_version<"3.2"', 'bottle>=0.12.9', 'enum34>=1.1.6; python_version<"3.3"', 'future>=0.14.0', 'futures; python_version<"3.2"', 'prettytable>=0.7.2', 'pygls==0.9.1', 'requests>=2.20.0', 'six>=1.10.0', 'tabulate>=0.8.5', 'typing>=3.7.4', 'waitress>=0.9.0', ], cmdclass = versioneer.get_cmdclass(), entry_points = { 'console_scripts' : ['hdl_checker=hdl_checker.server:main', ] }, classifiers=CLASSIFIERS.splitlines(), )
import html.parser class PyllageParser(html.parser.HTMLParser): def __init__(self): super().__init__() self.counter = 1 self.stack = {1: {"tag": "", "attrs": "", "data": []}} def handle_previous_tag(self): """Checks whether previously handled tag was significant.""" previous_tag = self.stack[self.counter] if not (previous_tag["attrs"] or previous_tag["data"]): del self.stack[self.counter] self.counter -= 1 def handle_starttag(self, tag, attrs): self.handle_previous_tag() self.counter += 1 attrs_string = " | ".join("{}={}".format(*attr) for attr in attrs) self.stack[self.counter] = {"tag": tag, "attrs": attrs_string, "data": []} def handle_data(self, data): data = data.strip() if data: self.stack[self.counter]["data"].append(data) def handle_entityref(self, name): self.stack[self.counter]["data"].append(self.unescape("&{};".format(name))) def handle_charref(self, name): self.stack[self.counter]["data"].append(self.unescape("&#{};".format(name))) def freeze_data(self): """Converts all data lists into string.""" self.handle_previous_tag() for key in self.stack: self.stack[key]["data"] = "".join(self.stack[key]["data"]) def parse(html): """Instantiate a parser to process html, return the stack.""" parser = PyllageParser() parser.feed(html) parser.freeze_data() return parser.stack
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import signal import sys import termios import time import tty from os import ( getpgrp, isatty, tcgetpgrp, ) from ansible.errors import AnsibleError from ansible.module_utils._text import to_text, to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import PY3 from ansible.plugins.action import ActionBase from ansible.utils.display import Display display = Display() try: import curses # Nest the try except since curses.error is not available if curses did not import try: curses.setupterm() HAS_CURSES = True except curses.error: HAS_CURSES = False except ImportError: HAS_CURSES = False if HAS_CURSES: MOVE_TO_BOL = curses.tigetstr('cr') CLEAR_TO_EOL = curses.tigetstr('el') else: MOVE_TO_BOL = b'\r' CLEAR_TO_EOL = b'\x1b[K' class AnsibleTimeoutExceeded(Exception): pass def timeout_handler(signum, frame): raise AnsibleTimeoutExceeded def clear_line(stdout): stdout.write(b'\x1b[%s' % MOVE_TO_BOL) stdout.write(b'\x1b[%s' % CLEAR_TO_EOL) def is_interactive(fd=None): if fd is None: return False if isatty(fd): # Compare the current process group to the process group associated # with terminal of the given file descriptor to determine if the process # is running in the background. return getpgrp() == tcgetpgrp(fd) else: return False class ActionModule(ActionBase): ''' pauses execution for a length or time, or until input is received ''' BYPASS_HOST_LOOP = True _VALID_ARGS = frozenset(('echo', 'minutes', 'prompt', 'seconds')) def run(self, tmp=None, task_vars=None): ''' run the pause action module ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect duration_unit = 'minutes' prompt = None seconds = None echo = True echo_prompt = '' result.update(dict( changed=False, rc=0, stderr='', stdout='', start=None, stop=None, delta=None, echo=echo )) # Should keystrokes be echoed to stdout? if 'echo' in self._task.args: try: echo = boolean(self._task.args['echo']) except TypeError as e: result['failed'] = True result['msg'] = to_native(e) return result # Add a note saying the output is hidden if echo is disabled if not echo: echo_prompt = ' (output is hidden)' # Is 'prompt' a key in 'args'? if 'prompt' in self._task.args: prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt) else: # If no custom prompt is specified, set a default prompt prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt) # Are 'minutes' or 'seconds' keys that exist in 'args'? if 'minutes' in self._task.args or 'seconds' in self._task.args: try: if 'minutes' in self._task.args: # The time() command operates in seconds so we need to # recalculate for minutes=X values. seconds = int(self._task.args['minutes']) * 60 else: seconds = int(self._task.args['seconds']) duration_unit = 'seconds' except ValueError as e: result['failed'] = True result['msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(e) return result ######################################################################## # Begin the hard work! start = time.time() result['start'] = to_text(datetime.datetime.now()) result['user_input'] = b'' stdin_fd = None old_settings = None try: if seconds is not None: if seconds < 1: seconds = 1 # setup the alarm handler signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(seconds) # show the timer and control prompts display.display("Pausing for %d seconds%s" % (seconds, echo_prompt)) display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"), # show the prompt specified in the task if 'prompt' in self._task.args: display.display(prompt) else: display.display(prompt) # save the attributes on the existing (duped) stdin so # that we can restore them later after we set raw mode stdin_fd = None stdout_fd = None try: if PY3: stdin = self._connection._new_stdin.buffer stdout = sys.stdout.buffer else: stdin = self._connection._new_stdin stdout = sys.stdout stdin_fd = stdin.fileno() stdout_fd = stdout.fileno() except (ValueError, AttributeError): # ValueError: someone is using a closed file descriptor as stdin # AttributeError: someone is using a null file descriptor as stdin on windoze stdin = None interactive = is_interactive(stdin_fd) if interactive: # grab actual Ctrl+C sequence try: intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR] except Exception: # unsupported/not present, use default intr = b'\x03' # value for Ctrl+C # get backspace sequences try: backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE] except Exception: backspace = [b'\x7f', b'\x08'] old_settings = termios.tcgetattr(stdin_fd) tty.setraw(stdin_fd) # Only set stdout to raw mode if it is a TTY. This is needed when redirecting # stdout to a file since a file cannot be set to raw mode. if isatty(stdout_fd): tty.setraw(stdout_fd) # Only echo input if no timeout is specified if not seconds and echo: new_settings = termios.tcgetattr(stdin_fd) new_settings[3] = new_settings[3] | termios.ECHO termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings) # flush the buffer to make sure no previous key presses # are read in below termios.tcflush(stdin, termios.TCIFLUSH) while True: if not interactive: if seconds is None: display.warning("Not waiting for response to prompt as stdin is not interactive") if seconds is not None: # Give the signal handler enough time to timeout time.sleep(seconds + 1) break try: key_pressed = stdin.read(1) if key_pressed == intr: # value for Ctrl+C clear_line(stdout) raise KeyboardInterrupt # read key presses and act accordingly if key_pressed in (b'\r', b'\n'): clear_line(stdout) break elif key_pressed in backspace: # delete a character if backspace is pressed result['user_input'] = result['user_input'][:-1] clear_line(stdout) if echo: stdout.write(result['user_input']) stdout.flush() else: result['user_input'] += key_pressed except KeyboardInterrupt: signal.alarm(0) display.display("Press 'C' to continue the play or 'A' to abort \r"), if self._c_or_a(stdin): clear_line(stdout) break clear_line(stdout) raise AnsibleError('user requested abort!') except AnsibleTimeoutExceeded: # this is the exception we expect when the alarm signal # fires, so we simply ignore it to move into the cleanup pass finally: # cleanup and save some information # restore the old settings for the duped stdin stdin_fd if not(None in (stdin_fd, old_settings)) and isatty(stdin_fd): termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings) duration = time.time() - start result['stop'] = to_text(datetime.datetime.now()) result['delta'] = int(duration) if duration_unit == 'minutes': duration = round(duration / 60.0, 2) else: duration = round(duration, 2) result['stdout'] = "Paused for %s %s" % (duration, duration_unit) result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict') return result def _c_or_a(self, stdin): while True: key_pressed = stdin.read(1) if key_pressed.lower() == b'a': return False elif key_pressed.lower() == b'c': return True
import os import unittest from vsg.rules import package from vsg import vhdlFile from vsg.tests import utils sTestDir = os.path.dirname(__file__) lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_001_test_input.vhd')) dIndentMap = utils.read_indent_file() lExpected = [] lExpected.append('') utils.read_file(os.path.join(sTestDir, 'rule_001_test_input.fixed.vhd'), lExpected) class test_package_rule(unittest.TestCase): def setUp(self): self.oFile = vhdlFile.vhdlFile(lFile) self.assertIsNone(eError) self.oFile.set_indent_map(dIndentMap) def test_rule_001(self): oRule = package.rule_001() self.assertTrue(oRule) self.assertEqual(oRule.name, 'package') self.assertEqual(oRule.identifier, '001') lExpected = [6] oRule.analyze(self.oFile) self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations)) def test_fix_rule_001(self): oRule = package.rule_001() oRule.fix(self.oFile) lActual = self.oFile.get_lines() self.assertEqual(lExpected, lActual) oRule.analyze(self.oFile) self.assertEqual(oRule.violations, [])
import requests import xml.etree.ElementTree as ET from time import sleep class QualysAPI: """Class to simplify the making and handling of API calls to the Qualys platform Class Members ============= server : String : The FQDN of the API server (with https:// prefix) user : String : The username of an API user in the subscription password : String : The password of the API user proxy : String : The FQDN of the proxy server to be used for connections (with https:// prefix) debug : Boolean : If True, will output debug information to the console during member function execution enableProxy : Boolean : If True will force connections via the proxy defined in the 'proxy' class member callCount : Integer : The number of API calls made during the life of the API object Class Methods ============= __init__(svr, usr, passwd, proxy, enableProxy, debug) Called when an object of type QualysAPI is created svr : String : The FQDN of the API server (with https:// prefix). Default value = "" usr : String : The username of an API user in the subscription. Default value = "" passwd : String : The password of the API user. Default value = "" proxy : String : The FQDN of the proxy server to be used for connections (with https:// prefix) Default value = "" enableProxy : Boolean : If True, will force connections made via the proxy defined in the 'proxy' class member Default value = False debug : Boolean : If True, will output debug information to the console during member function execution Default value = False makeCall(url, payload, headers, retryCount) Make a Qualys API call and return the response in XML format as an ElementTree.Element object url : String : The full URL of the API request, including any URL encoded parameters NO DEFAULT VALUE, REQUIRED PARAMETER payload : String : The payload (body) of the API request Default value = "" headers : Dict : HTTP Request headers to be sent in the API call Default value = None retryCount : Integer : The number of times this call has been attempted. Used in rate and concurrency limit handling, not intended for use by users Default value = 0 Example : api = QualysAPI(svr='https://qualysapi.qualys.com', usr='username', passwd='password', proxy='https://proxy.internal', enableProxy = True, debug=False) fullurl = '%s/full/path/to/api/call' % api.url api.makeCall(url=fullURL, payload='', headers={'X-Requested-With': 'python3'}) """ server: str user: str password: str proxy: str debug: bool enableProxy: bool callCount: int headers = {} sess: requests.Session def __init__(self, svr="", usr="", passwd="", proxy="", enableProxy=False, debug=False): # Set all member variables from the values passed in when object is created self.server = svr self.user = usr self.password = passwd self.proxy = proxy self.enableProxy = enableProxy self.debug = debug self.callCount = 0 # Create a session object with the requests library self.sess = requests.session() # Set the authentication credentials for the session to be the (username, password) tuple self.sess.auth = (self.user, self.password) # Add a default X-Requested-With header (most API calls require it, it doesn't hurt to have it in all calls) self.headers = {'X-Requested-With': 'python3/requests'} def makeCall(self, url, payload="", headers=None, retryCount=0): # Get the headers from our own session object rheaders = self.sess.headers # If there are headers (meaning the __init__ method has been called and the api object was correctly created) if headers is not None: # copy each of the headers passed in via the 'headers' variable to the session headers so they are included # in the request for h in headers.keys(): rheaders[h] = headers[h] # Create a Request object using the requests library r = requests.Request('POST', url, data=payload, headers=rheaders) # Prepare the request for sending prepped_req = self.sess.prepare_request(r) # If the proxy is enabled, send via the proxy if self.enableProxy: resp = self.sess.send(prepped_req, proxies={'https': self.proxy}) # Otherwise send direct else: resp = self.sess.send(prepped_req) if self.debug: print("QualysAPI.makeCall: Headers...") print("%s" % str(resp.headers)) # Handle concurrency limit failures if 'X-Concurrency-Limit-Limit' in resp.headers.keys() and 'X-Concurrency-Limit-Running' in resp.headers.keys(): climit = int(resp.headers['X-Concurrency-Limit-Limit']) crun = int(resp.headers['X-Concurrency-Limit-Running']) # If crun > climit then we have hit the concurrency limit. We then wait for a number of seconds depending # on how many retry attempts there have been if crun > climit: print("QualysAPI.makeCall: Concurrency limit hit. %s/%s running calls" % (crun,climit)) retryCount = retryCount + 1 if retryCount > 15: print("QualysAPI.makeCall: Retry count > 15, waiting 60 seconds") waittime = 60 elif retryCount > 5: print("QualysAPI.makeCall: Retry count > 5, waiting 30 seconds") waittime = 30 else: print("QualysAPI.makeCall: Waiting 15 seconds") waittime = 15 # Sleep here sleep(waittime) print("QualysAPI.makeCall: Retrying (retryCount = %s)" % str(retryCount)) # Make a self-referential call to this same class method, passing in the retry count so the next # iteration knows how many attempts have been made so far resp = self.makeCall(url=url, payload=payload,headers=headers, retryCount=retryCount) # Handle rate limit failures if 'X-RateLimit-ToWait-Sec' in resp.headers.keys(): if resp.headers['X-RateLimit-ToWait-Sec'] > 0: # If this response header has a value > 0, we know we have to wait some time so first we increment # the retryCount retryCount = retryCount + 1 # Get the number of seconds to wait from the response header. Add to this a number of seconds depending # on how many times we have already tried this call waittime = int(resp.headers['X-RateLimit-ToWait-Sec']) print("QualysAPI.makeCall: Rate limit reached, suggested wait time: %s seconds" % waittime) if retryCount > 15: print("QualysAPI.makeCall: Retry Count > 15, adding 60 seconds to wait time") waittime = waittime + 60 elif retryCount > 5: print("QualysAPI.makeCall: Retry Count > 5, adding 30 seconds to wait time") waittime = waittime + 30 # Sleep here sleep(waittime) print("QualysAPI.makeCall: Retrying (retryCount = %s)" % str(retryCount)) # Make a self-referential call to this same class method, passing in the retry count so the next # iteration knows how many attempts have been made so far resp = self.makeCall(url=url, payload=payload, headers=headers, retryCount=retryCount) # Increment the API call count (failed calls are not included in the count) self.callCount = self.callCount + 1 # Return the response as an ElementTree XML object return ET.fromstring(resp.text)
__author__ = 'Patrick Michl' __email__ = 'frootlab@gmail.com' __license__ = 'GPLv3' import nemoa import numpy class Links: """Class to unify common ann link attributes.""" params = {} def __init__(self): pass @staticmethod def energy(dSrc, dTgt, src, tgt, links, calc = 'mean'): """Return link energy as numpy array.""" if src['class'] == 'gauss': M = - links['A'] * links['W'] \ / numpy.sqrt(numpy.exp(src['lvar'])).T elif src['class'] == 'sigmoid': M = - links['A'] * links['W'] else: raise ValueError('unsupported unit class') return numpy.einsum('ij,ik,jk->ijk', dSrc, dTgt, M) @staticmethod def get_updates(data, model): """Return weight updates of a link layer.""" D = numpy.dot(data[0].T, data[1]) / float(data[1].size) M = numpy.dot(model[0].T, model[1]) / float(data[1].size) return { 'W': D - M } @staticmethod def get_updates_delta(data, delta): return { 'W': -numpy.dot(data.T, delta) / float(data.size) }
import glob import os.path import re import hashlib from bs4 import BeautifulSoup from subprocess import call, Popen, PIPE, STDOUT root = "/home/alex/tidalcycles.github.io/_site/" dnmatcher = re.compile(r'^\s*d[0-9]\s*(\$\s*)?') crmatcherpre = re.compile(r'^[\s\n\r]*') crmatcherpost = re.compile(r'[\s\n\r]*$') sizematcher = re.compile(r'\bsize\b') outpath = "../patterns/" for fn in glob.glob(os.path.join(root, "*.html")): soup = BeautifulSoup(open(fn), 'lxml') patterns = soup.find_all("div", "render") if len(patterns) > 0: print(fn + " (" + str(len(patterns)) +")") for pattern in patterns: code = pattern.get_text() code = crmatcherpre.sub('', code) code = crmatcherpost.sub('', code) digest = hashlib.md5(code).hexdigest() code = sizematcher.sub('Sound.Tidal.Context.size', code) outfn = outpath + digest + ".mp3" if (not os.path.exists(outfn)): print "building outfn: " + outfn print "digest:" + digest print "code >>" + code + "<<" code = dnmatcher.sub('', code) p = Popen(["./runpattern", outfn], stdout=PIPE, stdin=PIPE, stderr=STDOUT) tidalout = p.communicate(input=code)[0] print(tidalout) if p.returncode == 0: print "worked> " + outfn else: print "did not work."
"""Usage: codetrawl.dump PATTERN FILE [FILE...] where PATTERN is a Python format string like "{raw_url}", with allowed keys: - service - query - repo - path - raw_url - content """ import sys import docopt from .read import read_matches if __name__ == "__main__": args = docopt.docopt(__doc__) for match in read_matches(args["FILE"]): sys.stdout.write(args["PATTERN"].format(**match)) sys.stdout.write("\n")
import platform from PyQt5.QtGui import QIcon, QFont, QFontDatabase from PyQt5.QtCore import QSize class StyleDB(object): def __init__(self): # ---- frame self.frame = 22 self.HLine = 52 self.VLine = 53 self.sideBarWidth = 275 # ----- colors self.red = '#C83737' self.lightgray = '#E6E6E6' self.rain = '#0000CC' self.snow = '0.7' self.wlvl = '#0000CC' # '#000099' if platform.system() == 'Windows': self.font1 = QFont('Segoe UI', 11) # Calibri, Cambria self.font_console = QFont('Segoe UI', 9) self.font_menubar = QFont('Segoe UI', 10) elif platform.system() == 'Linux': self.font1 = QFont('Ubuntu', 11) self.font_console = QFont('Ubuntu', 9) self.font_menubar = QFont('Ubuntu', 10) if platform.system() == 'Windows': self.fontfamily = "Segoe UI" # "Cambria" #"Calibri" #"Segoe UI"" elif platform.system() == 'Linux': self.fontfamily = "Ubuntu" # 17 = QtGui.QFrame.Box | QtGui.QFrame.Plain # 22 = QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain # 20 = QtGui.QFrame.HLine | QtGui.QFrame.Plain # 52 = QtGui.QFrame.HLine | QtGui.QFrame.Sunken # 53 = QtGui.QFrame.VLine | QtGui.QFrame.Sunken
import unittest import wire class TestSQLString(unittest.TestCase): def setUp(self): '''Sets up the test case''' self.sql = wire.SQLString def test_pragma(self): '''Tests the PRAGMA SQL generation''' self.assertEqual(self.sql.pragma("INTEGRITY_CHECK(10)"), "PRAGMA INTEGRITY_CHECK(10)") self.assertEqual(self.sql.checkIntegrity(5), "PRAGMA INTEGRITY_CHECK(5)") def test_createTable(self): '''Tests the CREATE TABLE SQL generation''' table_outputs = ["CREATE TABLE test (id INT NOT NULL,username VARCHAR(255) DEFAULT 'default_user')", "CREATE TABLE test (username VARCHAR(255) DEFAULT 'default_user',id INT NOT NULL)"] temp_table_outputs = ["CREATE TEMPORARY TABLE test_temp (value REAL DEFAULT 0.0,time TIMESTAMP DEFAULT CURRENT_TIMESTAMP)", "CREATE TEMPORARY TABLE test_temp (time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,value REAL DEFAULT 0.0)"] self.assertIn(self.sql.createTable("test", False, id = "INT", username = ["VARCHAR(255)", "'default_user'"]), table_outputs) self.assertIn(self.sql.createTable("test_temp", True, value = ["REAL", 0.0], time = ["TIMESTAMP", "CURRENT_TIMESTAMP"]), temp_table_outputs) # include a Temp table test (False --> True) def test_dropTable(self): '''Tests the DROP TABLE SQL generation''' self.assertEqual(self.sql.dropTable("table_drop"), "DROP TABLE table_drop") self.assertEqual(self.sql.dropTable("some_other_table"), "DROP TABLE some_other_table") def test_renameTable(self): '''Tests the ALTER TABLE RENAME SQL generation''' self.assertEqual(self.sql.rename("orig_table", "new_table"), "ALTER TABLE orig_table RENAME TO new_table") if __name__ == '__main__': unittest.main()
import SOAP import supybot.utils as utils from supybot.commands import * import supybot.callbacks as callbacks class UrbanDict(callbacks.Plugin): threaded = True server = SOAP.SOAPProxy('http://api.urbandictionary.com/soap') def _licenseCheck(self, irc): license = self.registryValue('licenseKey') if not license: irc.error('You must have a free UrbanDictionary API license key ' 'in order to use this command. You can get one at ' '<http://www.urbandictionary.com/api.php>. Once you ' 'have one, you can set it with the command ' '"config supybot.plugins.UrbanDict.licenseKey <key>".', Raise=True) return license def urbandict(self, irc, msg, args, words): """<phrase> Returns the definition and usage of <phrase> from UrbanDictionary.com. """ license = self._licenseCheck(irc) definitions = self.server.lookup(license, ' '.join(words)) if not len(definitions): irc.error('No definition found.', Raise=True) word = definitions[0].word definitions = ['%s (%s)' % (d.definition, d.example) for d in definitions] irc.reply(utils.web.htmlToText('%s: %s' % (word, '; '.join(definitions)))) urbandict = wrap(urbandict, [many('something')]) def _define(self, irc, getDefinition, license): definition = getDefinition(license) word = definition.word definitions = ['%s (%s)' % (definition.definition, definition.example)] irc.reply(utils.web.htmlToText('%s: %s' % (word, '; '.join(definitions)))) def daily(self, irc, msg, args): """takes no arguments Returns the definition and usage of the daily phrase from UrbanDictionary.com. """ license = self._licenseCheck(irc) self._define(irc, self.server.get_daily_definition, license) daily = wrap(daily) def random(self, irc, msg, args): """takes no arguments Returns the definition and usage of a random phrase from UrbanDictionary.com. """ license = self._licenseCheck(irc) self._define(irc, self.server.get_random_definition, license) random = wrap(random) Class = UrbanDict
""" """ from DIRAC import S_OK, S_ERROR from DIRAC.Core.Utilities.CFG import CFG from DIRAC.Core.Utilities import List from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import getSubmitPools class JobManifest(object): def __init__(self, manifest=""): self.__manifest = CFG() self.__dirty = False self.__ops = False if manifest: result = self.load(manifest) if not result['OK']: raise Exception(result['Message']) def isDirty(self): return self.__dirty def setDirty(self): self.__dirty = True def clearDirty(self): self.__dirty = False def load(self, dataString): """ Auto discover format type based on [ .. ] of JDL """ dataString = dataString.strip() if dataString[0] == "[" and dataString[-1] == "]": return self.loadJDL(dataString) else: return self.loadCFG(dataString) def loadJDL(self, jdlString): """ Load job manifest from JDL format """ result = loadJDLAsCFG(jdlString.strip()) if not result['OK']: self.__manifest = CFG() return result self.__manifest = result['Value'][0] return S_OK() def loadCFG(self, cfgString): """ Load job manifest from CFG format """ try: self.__manifest.loadFromBuffer(cfgString) except Exception as e: return S_ERROR("Can't load manifest from cfg: %s" % str(e)) return S_OK() def dumpAsCFG(self): return str(self.__manifest) def getAsCFG(self): return self.__manifest.clone() def dumpAsJDL(self): return dumpCFGAsJDL(self.__manifest) def __getCSValue(self, varName, defaultVal=None): if not self.__ops: self.__ops = Operations(group=self.__manifest['OwnerGroup'], setup=self.__manifest['DIRACSetup']) if varName[0] != "/": varName = "JobDescription/%s" % varName return self.__ops.getValue(varName, defaultVal) def __checkNumericalVar(self, varName, defaultVal, minVal, maxVal): """ Check a numerical var """ initialVal = False if varName not in self.__manifest: varValue = self.__getCSValue("Default%s" % varName, defaultVal) else: varValue = self.__manifest[varName] initialVal = varValue try: varValue = long(varValue) except BaseException: return S_ERROR("%s must be a number" % varName) minVal = self.__getCSValue("Min%s" % varName, minVal) maxVal = self.__getCSValue("Max%s" % varName, maxVal) varValue = max(minVal, min(varValue, maxVal)) if initialVal != varValue: self.__manifest.setOption(varName, varValue) return S_OK(varValue) def __checkChoiceVar(self, varName, defaultVal, choices): """ Check a choice var """ initialVal = False if varName not in self.__manifest: varValue = self.__getCSValue("Default%s" % varName, defaultVal) else: varValue = self.__manifest[varName] initialVal = varValue if varValue not in self.__getCSValue("Choices%s" % varName, choices): return S_ERROR("%s is not a valid value for %s" % (varValue, varName)) if initialVal != varValue: self.__manifest.setOption(varName, varValue) return S_OK(varValue) def __checkMultiChoice(self, varName, choices): """ Check a multi choice var """ initialVal = False if varName not in self.__manifest: return S_OK() else: varValue = self.__manifest[varName] initialVal = varValue choices = self.__getCSValue("Choices%s" % varName, choices) for v in List.fromChar(varValue): if v not in choices: return S_ERROR("%s is not a valid value for %s" % (v, varName)) if initialVal != varValue: self.__manifest.setOption(varName, varValue) return S_OK(varValue) def __checkMaxInputData(self, maxNumber): """ Check Maximum Number of Input Data files allowed """ varName = "InputData" if varName not in self.__manifest: return S_OK() varValue = self.__manifest[varName] if len(List.fromChar(varValue)) > maxNumber: return S_ERROR('Number of Input Data Files (%s) greater than current limit: %s' % (len(List.fromChar(varValue)), maxNumber)) return S_OK() def __contains__(self, key): """ Check if the manifest has the required key """ return key in self.__manifest def setOptionsFromDict(self, varDict): for k in sorted(varDict): self.setOption(k, varDict[k]) def check(self): """ Check that the manifest is OK """ for k in ['OwnerName', 'OwnerDN', 'OwnerGroup', 'DIRACSetup']: if k not in self.__manifest: return S_ERROR("Missing var %s in manifest" % k) # Check CPUTime result = self.__checkNumericalVar("CPUTime", 86400, 100, 500000) if not result['OK']: return result result = self.__checkNumericalVar("Priority", 1, 0, 10) if not result['OK']: return result allowedSubmitPools = getSubmitPools(self.__manifest['OwnerGroup']) result = self.__checkMultiChoice("SubmitPools", list(set(allowedSubmitPools))) if not result['OK']: return result result = self.__checkMultiChoice("PilotTypes", ['private']) if not result['OK']: return result maxInputData = Operations().getValue("JobDescription/MaxInputData", 500) result = self.__checkMaxInputData(maxInputData) if not result['OK']: return result operation = Operations(group=self.__manifest['OwnerGroup']) allowedJobTypes = operation.getValue("JobDescription/AllowedJobTypes", ['User', 'Test', 'Hospital']) transformationTypes = operation.getValue("Transformations/DataProcessing", []) result = self.__checkMultiChoice("JobType", allowedJobTypes + transformationTypes) if not result['OK']: return result return S_OK() def createSection(self, secName, contents=False): if secName not in self.__manifest: if contents and not isinstance(contents, CFG): return S_ERROR("Contents for section %s is not a cfg object" % secName) self.__dirty = True return S_OK(self.__manifest.createNewSection(secName, contents=contents)) return S_ERROR("Section %s already exists" % secName) def getSection(self, secName): self.__dirty = True if secName not in self.__manifest: return S_ERROR("%s does not exist" % secName) sec = self.__manifest[secName] if not sec: return S_ERROR("%s section empty" % secName) return S_OK(sec) def setSectionContents(self, secName, contents): if contents and not isinstance(contents, CFG): return S_ERROR("Contents for section %s is not a cfg object" % secName) self.__dirty = True if secName in self.__manifest: self.__manifest[secName].reset() self.__manifest[secName].mergeWith(contents) else: self.__manifest.createNewSection(secName, contents=contents) def setOption(self, varName, varValue): """ Set a var in job manifest """ self.__dirty = True levels = List.fromChar(varName, "/") cfg = self.__manifest for l in levels[:-1]: if l not in cfg: cfg.createNewSection(l) cfg = cfg[l] cfg.setOption(levels[-1], varValue) def remove(self, opName): levels = List.fromChar(opName, "/") cfg = self.__manifest for l in levels[:-1]: if l not in cfg: return S_ERROR("%s does not exist" % opName) cfg = cfg[l] if cfg.deleteKey(levels[-1]): self.__dirty = True return S_OK() return S_ERROR("%s does not exist" % opName) def getOption(self, varName, defaultValue=None): """ Get a variable from the job manifest """ cfg = self.__manifest return cfg.getOption(varName, defaultValue) def getOptionList(self, section=""): """ Get a list of variables in a section of the job manifest """ cfg = self.__manifest.getRecursive(section) if not cfg or 'value' not in cfg: return [] cfg = cfg['value'] return cfg.listOptions() def isOption(self, opName): """ Check if it is a valid option """ return self.__manifest.isOption(opName) def getSectionList(self, section=""): """ Get a list of sections in the job manifest """ cfg = self.__manifest.getRecursive(section) if not cfg or 'value' not in cfg: return [] cfg = cfg['value'] return cfg.listSections()
import os import argparse from logger import HoneyHornetLogger from threading import BoundedSemaphore import threading import logging from datetime import date, datetime from termcolor import colored import http.client import re import time class ViewChecker(HoneyHornetLogger): def __init__(self, config=None): HoneyHornetLogger.__init__(self) self.config = config self.verbose = False self.banner = False MAX_CONNECTIONS = 20 # max threads that can be created self.CONNECTION_LOCK = BoundedSemaphore(value=MAX_CONNECTIONS) self.TIMER_DELAY = 3 # timer delay used for Telnet testing self.default_filepath = os.path.dirname(os.getcwd()) log_name = str(date.today()) + "_DEBUG.log" log_name = os.path.join(self.default_filepath, "logs", log_name) logging.basicConfig(filename=log_name, format='%(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG) def determine_camera_model(self, vulnerable_host, https=False, retry=False): """ simple banner grab with http.client """ ports = [] self.CONNECTION_LOCK.acquire() service = "DETERMINE-CAMERA-MODEL" if retry is False: try: host = vulnerable_host.ip ports_to_check = set(vulnerable_host.ports) except vulnerable_host.DoesNotExist: host = str(vulnerable_host) ports_to_check = set(ports.split(',').strip()) elif retry is True: host = vulnerable_host if self.verbose: print("[*] Checking camera make & model of {0}".format(host)) logging.info('{0} set for {1} service'.format(host, service)) try: for port in ports_to_check: if https is True: conn = http.client.HTTPSConnection(host, port) else: conn = http.client.HTTPConnection(host, port) conn.request("GET", "/") http_r1 = conn.getresponse() camera_check = http_r1.read(1024) headers = http_r1.getheaders() if self.verbose: print(http_r1.status, http_r1.reason) print(http_r1.status, http_r1.reason) results = re.findall(r"<title>(?P<camera_title>.*)</title>", str(camera_check)) if results: print(results) else: print("No match for <Title> tag found.") # puts banner into the class instance of the host # vulnerable_host.put_banner(port, banner_txt, http_r1.status, http_r1.reason, headers) # banner_grab_filename = str(date.today()) + "_banner_grabs.log" # banner_grab_filename = os.path.join(self.default_filepath, "logs", banner_grab_filename) # with open(banner_grab_filename, 'a') as banner_log: # banner_to_log = "host={0}, http_port={1},\nheaders={2},\nbanner={3}\n".format(host, port, # headers, banner_txt) # banner_log.write(banner_to_log) except http.client.HTTPException: try: self.determine_camera_model(host, https=True, retry=True) except Exception as error: logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error)) except Exception as error: if error[0] == 104: self.determine_camera_model(host, https=True, retry=True) logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error)) except KeyboardInterrupt: exit(0) self.CONNECTION_LOCK.release() def run_view_checker(self, hosts_to_check): """ Function tests hosts for default credentials on open 'admin' ports Utilizes threading to greatly speed up the scanning """ service = "building_threads" logging.info("Building threads.") logging.info("Verbosity set to {0}".format(self.verbose)) threads = [] print("[*] Testing vulnerable host ip addresses...") try: for vulnerable_host in hosts_to_check: if self.verbose: print('[*] checking >> {0}'.format(vulnerable_host.ip)) if set(vulnerable_host.ports): t0 = threading.Thread(target=self.determine_camera_model, args=(vulnerable_host, )) threads.append(t0) logging.info("Starting {0} threads.".format(len(threads))) for thread in threads: thread.start() for thread in threads: thread.join(120) except KeyboardInterrupt: exit(0) except threading.ThreadError as error: logging.exception("{0}\t{1}".format(service, error)) except Exception as e: logging.exception(e)
import os import subprocess import scipy.io as sio import bet.sampling.basicSampling as bsam def lb_model(input_data): io_file_name = "io_file" io_mdat = dict() io_mdat['input'] = input_data # save the input to file sio.savemat(io_file_name, io_mdat) # run the model subprocess.call(['python', 'serial_model.py', io_file_name]) # read the output from file io_mdat = sio.loadmat(io_file_name) output_data = io_mdat['output'] return output_data my_sampler = bsam.sampler(lb_model) my_discretization = my_sampler.create_random_discretization(sample_type='r', input_obj=4, savefile="serial_serial_example", num_samples=100)
"""Almanac data This module can optionally use PyEphem, which offers high quality astronomical calculations. See http://rhodesmill.org/pyephem. """ import time import sys import math import weeutil.Moon import weewx.units try: import ephem except ImportError: import weeutil.Sun class Almanac(): """Almanac data. ATTRIBUTES. As a minimum, the following attributes are available: sunrise: Time (local) upper limb of the sun rises above the horizon, formatted using the format 'timeformat'. sunset: Time (local) upper limb of the sun sinks below the horizon, formatted using the format 'timeformat'. moon_phase: A description of the moon phase(eg. "new moon", Waxing crescent", etc.) moon_fullness: Percent fullness of the moon (0=new moon, 100=full moon) If the module 'ephem' is used, them many other attributes are available. Here are a few examples: sun.rise: Time upper limb of sun will rise above the horizon today in unix epoch time sun.transit: Time of transit today (sun over meridian) in unix epoch time sun.previous_sunrise: Time of last sunrise in unix epoch time sun.az: Azimuth (in degrees) of sun sun.alt: Altitude (in degrees) of sun mars.rise: Time when upper limb of mars will rise above horizon today in unix epoch time mars.ra: Right ascension of mars etc. EXAMPLES (note that these will only work in the Pacific Time Zone) >>> t = 1238180400 >>> print timestamp_to_string(t) 2009-03-27 12:00:00 PDT (1238180400) >>> almanac = Almanac(t, 46.0, -122.0) Test backwards compatibility with attribute 'moon_fullness': >>> print "Fullness of the moon (rounded) is %.2f%% [%s]" % (almanac.moon_fullness, almanac.moon_phase) Fullness of the moon (rounded) is 2.00% [new (totally dark)] Now get a more precise result for fullness of the moon: >>> print "Fullness of the moon (more precise) is %.2f%%" % almanac.moon.moon_phase Fullness of the moon (more precise) is 1.70% Test backwards compatibility with attributes 'sunrise' and 'sunset' >>> print "Sunrise, sunset:", almanac.sunrise, almanac.sunset Sunrise, sunset: 06:56 19:30 Get sunrise, sun transit, and sunset using the new 'ephem' syntax: >>> print "Sunrise, sun transit, sunset:", almanac.sun.rise, almanac.sun.transit, almanac.sun.set Sunrise, sun transit, sunset: 06:56 13:13 19:30 Do the same with the moon: >>> print "Moon rise, transit, set:", almanac.moon.rise, almanac.moon.transit, almanac.moon.set Moon rise, transit, set: 06:59 14:01 21:20 Exercise equinox, solstice routines >>> print almanac.next_vernal_equinox 20-Mar-2010 10:32 >>> print almanac.next_autumnal_equinox 22-Sep-2009 14:18 >>> print almanac.next_summer_solstice 20-Jun-2009 22:45 >>> print almanac.previous_winter_solstice 21-Dec-2008 04:03 >>> print almanac.next_winter_solstice 21-Dec-2009 09:46 Exercise moon state routines >>> print almanac.next_full_moon 09-Apr-2009 07:55 >>> print almanac.next_new_moon 24-Apr-2009 20:22 >>> print almanac.next_first_quarter_moon 02-Apr-2009 07:33 Now location of the sun and moon >>> print "Solar azimuth, altitude = (%.2f, %.2f)" % (almanac.sun.az, almanac.sun.alt) Solar azimuth, altitude = (154.14, 44.02) >>> print "Moon azimuth, altitude = (%.2f, %.2f)" % (almanac.moon.az, almanac.moon.alt) Moon azimuth, altitude = (133.55, 47.89) Try the pyephem "Naval Observatory" example. >>> t = 1252252800 >>> print timestamp_to_gmtime(t) 2009-09-06 16:00:00 UTC (1252252800) >>> atlanta = Almanac(t, 33.8, -84.4, pressure=0, horizon=-34.0/60.0) >>> # Print it in GMT, so it can easily be compared to the example: >>> print timestamp_to_gmtime(atlanta.sun.previous_rising.raw) 2009-09-06 11:14:56 UTC (1252235696) >>> print timestamp_to_gmtime(atlanta.moon.next_setting.raw) 2009-09-07 14:05:29 UTC (1252332329) Now try the civil twilight examples: >>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).previous_rising.raw) 2009-09-06 10:49:40 UTC (1252234180) >>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).next_setting.raw) 2009-09-07 00:21:22 UTC (1252282882) """ def __init__(self, time_ts, lat, lon, altitude=None, # Use 'None' in case a bad value is passed in temperature=None, # " pressure=None, # " horizon=None, # " moon_phases=weeutil.Moon.moon_phases, formatter=weewx.units.Formatter()): """Initialize an instance of Almanac time_ts: A unix epoch timestamp with the time of the almanac. If None, the present time will be used. lat, lon: Observer's location altitude: Observer's elevation in **meters**. [Optional. Default is 0 (sea level)] temperature: Observer's temperature in **degrees Celsius**. [Optional. Default is 15.0] pressure: Observer's atmospheric pressure in **mBars**. [Optional. Default is 1010] horizon: Angle of the horizon in degrees [Optional. Default is zero] moon_phases: An array of 8 strings with descriptions of the moon phase. [optional. If not given, then weeutil.Moon.moon_phases will be used] formatter: An instance of weewx.units.Formatter() with the formatting information to be used. """ self.time_ts = time_ts if time_ts else time.time() self.time_djd = timestamp_to_djd(self.time_ts) self.lat = lat self.lon = lon self.altitude = altitude if altitude is not None else 0.0 self.temperature = temperature if temperature is not None else 15.0 self.pressure = pressure if pressure is not None else 1010.0 self.horizon = horizon if horizon is not None else 0.0 self.moon_phases = moon_phases self.formatter = formatter (y,m,d) = time.localtime(self.time_ts)[0:3] (self.moon_index, self._moon_fullness) = weeutil.Moon.moon_phase(y, m, d) self.moon_phase = self.moon_phases[self.moon_index] # Check to see whether the user has module 'ephem'. if 'ephem' in sys.modules: self.hasExtras = True else: # No ephem package. Use the weeutil algorithms, which supply a minimum of functionality (sunrise_utc, sunset_utc) = weeutil.Sun.sunRiseSet(y, m, d, self.lon, self.lat) # The above function returns its results in UTC hours. Convert # to a local time tuple sunrise_tt = weeutil.weeutil.utc_to_local_tt(y, m, d, sunrise_utc) sunset_tt = weeutil.weeutil.utc_to_local_tt(y, m, d, sunset_utc) self._sunrise = time.strftime("%H:%M", sunrise_tt) self._sunset = time.strftime("%H:%M", sunset_tt) self.hasExtras = False # Shortcuts, used for backwards compatibility @property def sunrise(self): return self.sun.rise if self.hasExtras else self._sunrise @property def sunset(self): return self.sun.set if self.hasExtras else self._sunset @property def moon_fullness(self): return int(self.moon.moon_phase+0.5) if self.hasExtras else self._moon_fullness # What follows is a bit of Python wizardry to allow syntax such as: # almanac(horizon=-0.5).sun.rise def __call__(self, **kwargs): """Call an almanac object as a functor. This allows overriding the values used when the Almanac instance was initialized. Named arguments: Any named arguments will be passed on to the initializer of the ObserverBinder, overriding any default values. These are all optional: almanac_time: The observer's time in unix epoch time. lat: The observer's latitude in degrees lon: The observer's longitude in degrees altitude: The observer's altitude in meters horizon: The horizon angle in degrees temperature: The observer's temperature (used to calculate refraction) pressure: The observer's pressure (used to calculate refraction) """ # Using an encapsulated class allows easy access to the default values class ObserverBinder(object): # Use the default values provided by the outer class (Almanac): def __init__(self, almanac_time=self.time_ts, lat=self.lat, lon=self.lon, altitude=self.altitude, horizon=self.horizon, temperature=self.temperature, pressure=self.pressure, formatter=self.formatter): # Build an ephem Observer object self.observer = ephem.Observer() self.observer.date = timestamp_to_djd(almanac_time) self.observer.lat = math.radians(lat) self.observer.long = math.radians(lon) self.observer.elev = altitude self.observer.horizon = math.radians(horizon) self.observer.temp = temperature self.observer.pressure= pressure self.formatter = formatter def __getattr__(self, body): """Return a BodyWrapper that binds the observer to a heavenly body. If there is no such body an exception of type AttributeError will be raised. body: A heavenly body. Examples, 'sun', 'moon', 'jupiter' Returns: An instance of a BodyWrapper. It will bind together the heavenly body (an instance of something like ephem.Jupiter) and the observer (an instance of ephem.Observer) """ # Find the module used by pyephem. For example, the module used for # 'mars' is 'ephem.Mars'. If there is no such module, an exception # of type AttributeError will get thrown. ephem_module = getattr(ephem, body.capitalize()) # Now, together with the observer object, return an # appropriate BodyWrapper return BodyWrapper(ephem_module, self.observer, self.formatter) # This will override the default values with any explicit parameters in kwargs: return ObserverBinder(**kwargs) def __getattr__(self, attr): if not self.hasExtras: # If the Almanac does not have extended capabilities, we can't # do any of the following. Raise an exception. raise AttributeError, "Unknown attribute %s" % attr # We do have extended capability. Check to see if the attribute is a calendar event: elif attr in ['previous_equinox', 'next_equinox', 'previous_solstice', 'next_solstice', 'previous_autumnal_equinox', 'next_autumnal_equinox', 'previous_vernal_equinox', 'next_vernal_equinox', 'previous_winter_solstice', 'next_winter_solstice', 'previous_summer_solstice', 'next_summer_solstice', 'previous_new_moon', 'next_new_moon', 'previous_first_quarter_moon', 'next_first_quarter_moon', 'previous_full_moon', 'next_full_moon', 'previous_last_quarter_moon', 'next_last_quarter_moon']: # This is how you call a function on an instance when all you have # is the function's name as a string djd = getattr(ephem, attr)(self.time_djd) return weewx.units.ValueHelper((djd, "dublin_jd", "group_time"), context="ephem_year", formatter=self.formatter) else: # It's not a calendar event. The attribute must be a heavenly body # (such as 'sun', or 'jupiter'). Create an instance of # ObserverBinder by calling the __call__ function in Almanac, but # with no parameters binder = self() # Now try getting the body as an attribute. If successful, an # instance of BodyWrapper will be returned. If not, an exception of # type AttributeError will be raised. return getattr(binder, attr) fn_map = {'rise' : 'next_rising', 'set' : 'next_setting', 'transit' : 'next_transit'} class BodyWrapper(object): """This class wraps a celestial body. It returns results in degrees (instead of radians) and percent (instead of fractions). For times, it returns the results as a ValueHelper. It also deals with the unfortunate design decision in pyephem to change the state of the celestial body when using it as an argument in certain functions.""" def __init__(self, body_factory, observer, formatter): """Initialize a wrapper body_factory: A function that returns an instance of the body to be wrapped. Example would be ephem.Sun observer: An instance of ephem.Observer, containing the observer's lat, lon, time, etc. formatter: An instance of weewx.units.Formatter(), containing the formatting to be used for times. """ self.body_factory = body_factory self.observer = observer self.formatter = formatter self.body = body_factory(observer) self.use_center = False # Calculate and store the start-of-day in Dublin Julian Days: (y,m,d) = time.localtime(djd_to_timestamp(observer.date))[0:3] self.sod_djd = timestamp_to_djd(time.mktime((y,m,d,0,0,0,0,0,-1))) def __call__(self, use_center=False): self.use_center = use_center return self def __getattr__(self, attr): if attr in ['az', 'alt', 'a_ra', 'a_dec', 'g_ra', 'ra', 'g_dec', 'dec', 'elong', 'radius', 'hlong', 'hlat', 'sublat', 'sublong']: # Return the results in degrees rather than radians return math.degrees(getattr(self.body, attr)) elif attr=='moon_phase': # Return the result in percent return 100.0 * self.body.moon_phase elif attr in ['next_rising', 'next_setting', 'next_transit', 'next_antitransit', 'previous_rising', 'previous_setting', 'previous_transit', 'previous_antitransit']: # These functions have the unfortunate side effect of changing the state of the body # being examined. So, create a temporary body and then throw it away temp_body = self.body_factory() time_djd = getattr(self.observer, attr)(temp_body, use_center=self.use_center) return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter) elif attr in fn_map: # These attribute names have to be mapped to a different function name. Like the # attributes above, they also have the side effect of changing the state of the body. # Finally, they return the time of the event anywhere in the day (not just the next # event), so they take a second argument in the function call. temp_body = self.body_factory(self.observer) # Look up the function to be called for this attribute (eg, call 'next_rising' for 'rise') fn = fn_map[attr] # Call the function, with a second argument giving the start-of-day time_djd = getattr(self.observer, fn)(temp_body, self.sod_djd) return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter) else: # Just return the result unchanged. return getattr(self.body, attr) def timestamp_to_djd(time_ts): """Convert from a unix time stamp to the number of days since 12/31/1899 12:00 UTC (aka "Dublin Julian Days")""" # The number 25567.5 is the start of the Unix epoch (1/1/1970). Just add on the # number of days since then return 25567.5 + time_ts/86400.0 def djd_to_timestamp(djd): """Convert from number of days since 12/31/1899 12:00 UTC ("Dublin Julian Days") to unix time stamp""" return (djd-25567.5) * 86400.0 if __name__ == '__main__': import doctest from weeutil.weeutil import timestamp_to_string, timestamp_to_gmtime #@UnusedImport doctest.testmod()
import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import tornado.httpclient import lcs from urllib import quote from urllib import unquote from tornado import gen import ttlrcdump listen_port = 38439 def ChooiseItem(xml, artist): #print '===============================================' #print xml.decode('utf-8').encode('gbk') n = xml.find('<?xml') if n == -1: return False artist = ttlrcdump.FilterSearchStr(artist) #remove item if artist != artist n = 0 pos = 0 t = xml.count('id=') for n in range(0, t): begin = xml.find('artist="', pos) end = xml.find('" title', begin) _artist = ttlrcdump.FilterSearchStr(xml[begin+8:end]) pos = end n += 1 arith = lcs.arithmetic() samelen = len(arith.lcs(_artist,artist)) #print samelen if samelen < 5 and samelen < len(artist)/3 : begin = xml.rfind('<lrc',0 ,pos) end = xml.find('lrc>', pos) xml = xml[:begin] + xml[end + 4:] pos = begin n -= 1 t -= 1 #print xml.decode('utf-8').encode('gbk') #print '===============================================' n = xml.find('id=') if n == -1: return False #remove item if artist != artist n = 0 begin = xml.find('artist="', n) end = xml.find('" title', n) n = end _artist = ttlrcdump.FilterSearchStr(xml[begin+10:end]) strs = ('动新','動新','动基','对照','對照','中日','中英','修正','假名') for _str in strs: n = xml.find(_str) if n != -1: break if n == -1: n = xml.find('<lrc') else: n = xml.rfind('<lrc', 0, n) if n > -1: begin = xml.find('id="', n) + 4 end = xml.find('"', begin) #print xml[begin:end] id = xml[begin:end] begin = xml.find('artist="', n) + 8 end = xml.find('"', begin ) #print quote(xml[begin:end]) artist = xml[begin:end].replace('&amp;','&').replace('&apos;',"'").replace('&quot;','"').replace('&lt;','<').replace('&gt;','>') begin = xml.find('title="', n) + 7 end = xml.find('"', begin) #print quote(xml[begin + 7:end]) title = xml[begin:end].replace('&amp;','&').replace('&apos;',"'").replace('&quot;','"').replace('&lt;','<').replace('&gt;','>') #ret = "id=%s&artist=%s&title=%s" % (id, quote(artist), quote(title)) #print ret data = {'id':id, 'artist':artist, 'title':title} return data return False def get_arg(req, arg): begin = req.find('%s=' % arg) if begin != -1: begin += len(arg) + 1 end = req.find('&', begin) if end != -1: return req[begin:end] else: return req[begin:] @gen.coroutine def handle_request(request): if request.uri.startswith('/lrc'): try: id = get_arg(request.uri, 'id') artist = unquote(get_arg(request.uri, 'artist')) title = unquote(get_arg(request.uri, 'title')) ttservernum = int(get_arg(request.uri, 'ttservernum')) #print id.decode('utf-8').encode('gbk') #print artist.decode('utf-8').encode('gbk') #print title.decode('utf-8').encode('gbk') print str(ttservernum) http_client = tornado.httpclient.AsyncHTTPClient() #print ttlrcdump.GetDownloadLrcReq(id, artist, title) req = tornado.httpclient.HTTPRequest(ttlrcdump.GetDownloadLrcReq(ttservernum, id, artist, title)) res = yield http_client.fetch(req) lrc = res.body.replace('>', '】') lrc = lrc.replace('<', '【') lrc = lrc.replace('\r\n', '<br />') lrc = lrc.replace('\n', '<br />') lrc = lrc.replace('\r', '<br />') context = '<script type="text/javascript" src="/templates/ddjs/lrc_content_inner_1.js"></script></div>%s</li>' context = context.replace('%s',lrc, 1) #print context request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context)) except tornado.httpclient.HTTPError, code: print 'HTTPError except Code' + str(code) except Exception,e: print e finally: request.finish() elif (request.uri.find('/?keyword=') != -1): uri = request.uri.decode('gbk').replace('%20',' ') if uri.find('&') != -1: keyword = uri[10:uri.find('&')] else:keyword = uri[10:] #print repr(keyword) keyword = keyword.encode('gbk') #print repr(keyword) keyword = keyword.decode('utf-8') #print repr(keyword) keyword = eval(repr(keyword)[1:]) #print repr(keyword) keyword = keyword.decode('gbk').encode('utf-8') #print keyword.decode('utf-8').encode('gbk') #print repr(keyword) try: if keyword.count(' ') == 0: keyword += ' ' n = 0 ttservernum = 0 cnt = keyword.count(' ') for i in range(0, cnt): #try to prase art and title n = keyword.find(' ', n) + 1 artist = keyword[0:n-1] title = keyword[n:] #print 'title %s' % title if title.startswith( '(') and i < cnt - 1: #歌名一般不可能以括号开头 continue #print 'guess art=%s' % artist.decode('utf-8').encode('gbk') #print 'guess tit=%s' % title.decode('utf-8').encode('gbk') trycnt = 0 if artist.find('and') == -1 and title.find('and') == -1: trycnt = 1 while True: reqartist = '' reqtitle = '' if trycnt == 0: reqartist = artist.replace('and', '') reqtitle = title.replace('and', '') elif trycnt == 1: reqartist = artist reqtitle = title http_client = tornado.httpclient.AsyncHTTPClient() #print ttlrcdump.GetSearchLrcReq(ttservernum, artist, title) ttservernum = ttlrcdump.GetServerNum() req = tornado.httpclient.HTTPRequest(ttlrcdump.GetSearchLrcReq(ttservernum, reqartist, reqtitle)) res = yield http_client.fetch(req) ret = ChooiseItem(res.body, artist) if ret != False or trycnt > 0: break trycnt += 1 if ret != False: break if ret != False: context = '<div class="newscont mb15" style="line-height:160%;margin-top:10px">' \ '歌手:<a class="mr">%s</a><br>' \ '专辑:<a class="mr"></a>' \ '歌曲:<a class="mr ">%s<span class="highlighter">a</span></a><br>' \ '查看:<a class="mr"href="%s" target="_blank">LRC' \ '<div style="clear:both;"></div>' \ '<div class="page wid f14">' context = context.replace('%s', artist, 1) uni_title = title.decode('utf-8') strrep = '' for i in range(0, len(uni_title)): strrep += '<span class="highlighter">%s</span>' % uni_title[i].encode('utf-8') context = context.replace('%s', strrep, 1) context = context.replace('%s', "/lrc/?id=%s&artist=%s&title=%s&ttservernum=%s" % (str(ret['id']), quote(str(ret['artist'])), quote(str(ret['title'])), str(ttservernum))) #print context.decode('utf-8').encode('gbk') else: context = 'Lrc Not Found' request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context)) except tornado.httpclient.HTTPError, code: print 'HTTPError except Code' + str(code) except Exception,e: print e finally: request.finish() else: #print 'Unknow Request:%s' % request.uri context = '<head><meta http-equiv="refresh" content="0;url=http://foottlrc.mengsky.net/"></head>' request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context)) request.finish() def main(): http_server = tornado.httpserver.HTTPServer(handle_request) http_server.listen(listen_port) tornado.ioloop.IOLoop.instance().start() if __name__ == "__main__": main()
from edu.umd.rhsmith.diads.tools.sentiment import ISentimentAnalyzer import pickle import re import os import sys import time import traceback import nltk from nltk.corpus import stopwords class SentimentAnalyzerP(ISentimentAnalyzer, object): ''' Sentiment Analyzer Utility ''' def __init__(self, classifierFilename, featuresFilename): ##### CODE FOR FEATURE EXTRACTION FROM TWEET TEXT self.punc_reducer = re.compile(r'(\W)\1+') self.repeat_reducer = re.compile(r'(\w)\1{2,}') self.punc_breaker_1 = re.compile(r'(\w{2,})(\W\s)') self.punc_breaker_2 = re.compile(r'(\s\W)(\w{2,})') self.punc_breaker_3 = re.compile(r'(\w{3,})(\W{2}\s)') self.punc_breaker_4 = re.compile(r'(\s\W{2})(\w{3,})') self.quote_replacer = re.compile(r'&quot;') self.amp_replacer = re.compile(r'&amp;') self.gt_replacer = re.compile(r'&gt;') self.lt_replacer = re.compile(r'&lt;') self.mention_replacer = re.compile(r'@\w+') self.link_replacer = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))') #self.link_replacer = re.compile(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))') self.caps_finder = re.compile(r'(\b[A-Z]{4,})\b') self.lol_reducer = re.compile(r'\b[aeo]*h[aeo]+(h+[aeo]*)*\b|\bl(o+l+)+s*z*(e?d)?\b|\brofls*z*(e?d)?\b|\blu+l+s*z*(e?d)?\b|\blmf+a+o+\b') nltk.data.path.append("%s/py/nltk_data" % os.getcwd()) self.stopwords_dict = [(x, True) for x in stopwords.words()] ##### IMPORT THE SENTIMENT CLASSIFIER ##### try: print "Trying to import sentiment classifier; could take a couple minutes..." sys.stdout.flush() f = open(classifierFilename, 'r') self.classifier = pickle.load(f) f.close() f = open(featuresFilename, 'r') self.masterfeats = pickle.load(f) f.close() print "Sentiment classifier import succeeded!" sys.stdout.flush() except Exception: print "Sentiment classifier import failed!" print traceback.format_exc() sys.exit(1) def featurify(self, text, master = None): ext_tokens = [] # replace "&quot;" with a double quote text = self.quote_replacer.sub(r'"', text) text = self.amp_replacer.sub(r'&', text) text = self.gt_replacer.sub(r'>', text) text = self.lt_replacer.sub(r'<', text) #print text # replace mentions with a dummy string (text, num) = self.mention_replacer.subn(r'', text) if num > 0: ext_tokens.append("<MENTION>") # replace links with a dummy string (text, num) = self.link_replacer.subn(r'', text) if num > 0: ext_tokens.append("<LINK>") # find words in all caps and add a dummy string to note that (text, num) = self.caps_finder.subn(r'\1', text) if num > 0: ext_tokens.append("<CAPS>") # find laughter and replace with a dummy string (text, num) = self.lol_reducer.subn(r'', text) if num > 0: ext_tokens.append("<LULZ>") # lowercase everything text = text.lower() # isolates and reduces long spans of repeated punctuation to a single item (like "...." / " !!!! " / "????") text = self.punc_reducer.sub(r' \1 ', text) # shorten long spans of repeated word chars to three ("soooooooo" ==> "sooo") text = self.repeat_reducer.sub(r'\1\1\1', text) # break single-character punctuation off of words of size 2 or more (quotes, exclaims, periods) text = self.punc_breaker_1.sub(r' \1 \2 ', text) text = self.punc_breaker_2.sub(r' \1 \2 ', text) # break double-character punctuation off of words of size 3 or more (quote-period, question-exclaim) text = self.punc_breaker_3.sub(r' \1 \2 ', text) text = self.punc_breaker_4.sub(r' \1 \2 ', text) # split on all whitespace tokens = re.split(r'\s+', text) # remove stopwords and blanks tokens = [x for x in tokens if len(x) > 0 and x not in self.stopwords_dict] # add in manual extra tokens tokens += ext_tokens #print tokens #print if master == None: feats = dict([(word, True) for word in tokens]) else: feats = dict([(word, True) for word in tokens if word in master]) # make the feature data structure return feats def process(self, text): try: # hack to skip statuses that have weird non-unicode text in them; # these can cause problems down the line for the regexes in featurify() try: unicode(text, "ascii", "strict") except UnicodeDecodeError: #print "Unicode error on status %i; stripping." % row['id'] #sys.stdout.flush() try: text = unicode(text, "utf-8").encode("ascii", "ignore") except UnicodeDecodeError: print "Unicode error on status; skipping." sys.stdout.flush() # featurify the text, using only the features in the master list statfeat = {} #try: # with time_limit(10): statfeat = self.featurify(text, self.masterfeats) #except TimeoutException: # print "Featurify timed out for status_id %i" % row['id'] if len(statfeat) > 0: result = self.classifier.prob_classify(statfeat) probs = dict([(x, result.prob(x)) for x in result.samples()]) # calculate a score in [-1, +1] score = probs['pos'] * 2.0 - 1.0 else: # skip classification b/c there are no features! score = 0.0 return score except Exception: print "Problem processing queries:" print traceback.format_exc() sys.stdout.flush()
import unittest from cron_status import * class TestChangeDetection(unittest.TestCase): """Test if the change detection is operational.""" # Please note that status_history_list is backwards, # i.e., newest entry first. def test_all_okay(self): status_history_list = [ {'foo': (ContainerStatus.OKAY, 'no msg')} ] * (STATUS_HISTORY_LENGTH + 1) changed, status = detect_flapping_and_changes(status_history_list) self.assertFalse(changed) self.assertEqual(changed, status[0].changed) # because there is only 1 container self.assertEqual(status[0].overall_status, ContainerStatus.OKAY) self.assertEqual(status[0].current_status, ContainerStatus.OKAY) self.assertTrue(status[0].container_name in status_history_list[0]) self.assertEqual(status[0].current_msg, status_history_list[0][status[0].container_name][1]) def test_all_failed(self): status_history_list = [ {'foo': (ContainerStatus.FAILED, 'no msg')} ] * (STATUS_HISTORY_LENGTH + 1) changed, status = detect_flapping_and_changes(status_history_list) self.assertFalse(changed) self.assertEqual(changed, status[0].changed) # because there is only 1 container self.assertEqual(status[0].overall_status, ContainerStatus.FAILED) self.assertEqual(status[0].current_status, ContainerStatus.FAILED) def test_failed_after_starting_short(self): status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}] status_history_list += [ {'foo': (ContainerStatus.STARTING, 'no msg')} ] * (STATUS_HISTORY_LENGTH - 1) status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}] changed, status = detect_flapping_and_changes(status_history_list) self.assertTrue(changed) self.assertEqual(status[0].overall_status, ContainerStatus.FAILED) def test_failed_after_starting_very_long(self): status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}] status_history_list += [ {'foo': (ContainerStatus.STARTING, 'no msg')} ] * STATUS_HISTORY_LENGTH changed, status = detect_flapping_and_changes(status_history_list) self.assertTrue(changed) self.assertEqual(status[0].overall_status, ContainerStatus.FAILED) def test_okay_after_failed(self): status_history_list = [ {'foo': (ContainerStatus.OKAY, 'no msg')} ] status_history_list += [ {'foo': (ContainerStatus.FAILED, 'no msg')} ] * STATUS_HISTORY_LENGTH changed, status = detect_flapping_and_changes(status_history_list) self.assertTrue(changed) self.assertEqual(status[0].overall_status, ContainerStatus.OKAY) def test_failed_after_okay(self): status_history_list = [ {'foo': (ContainerStatus.FAILED, 'no msg')} ] status_history_list += [ {'foo': (ContainerStatus.OKAY, 'no msg')} ] * STATUS_HISTORY_LENGTH changed, status = detect_flapping_and_changes(status_history_list) self.assertTrue(changed) self.assertEqual(status[0].overall_status, ContainerStatus.FAILED) def test_missing_data(self): status_history_list = [ {'foo': (ContainerStatus.FAILED, 'no msg')} ] * (STATUS_HISTORY_LENGTH - 1) status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}] changed, status = detect_flapping_and_changes(status_history_list) self.assertFalse(changed) self.assertEqual(status[0].overall_status, ContainerStatus.FAILED) def test_too_much_data(self): status_history_list = [ {'foo': (ContainerStatus.OKAY, 'no msg')} ] * (STATUS_HISTORY_LENGTH + 1) status_history_list += [{'foo': (ContainerStatus.FAILED, 'no msg')}] changed, status = detect_flapping_and_changes(status_history_list) self.assertFalse(changed) self.assertEqual(status[0].overall_status, ContainerStatus.OKAY) if __name__ == '__main__': unittest.main()
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Class_year', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('class_year_text', models.CharField(default='Class Year', editable=False, max_length=10)), ], ), migrations.CreateModel( name='Name', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name_text', models.CharField(default='Name', editable=False, max_length=4)), ], ), migrations.CreateModel( name='Resume', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('resume_text', models.CharField(default='Resume', editable=False, max_length=6)), ], ), migrations.CreateModel( name='Wpi_email', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('wpi_email_text', models.CharField(default='WPI Email', editable=False, max_length=9)), ], ), migrations.AddField( model_name='resume', name='identifier', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'), ), migrations.AddField( model_name='name', name='identifier', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'), ), migrations.AddField( model_name='class_year', name='identifier', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'), ), ]
import os import sys def usage(): print "{0} <feed>".format(os.path.basename(__file__)) if __name__ == '__main__': kmotion_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(kmotion_dir) from core.camera_lost import CameraLost feed = '' if len(sys.argv) > 1: feed = sys.argv[1] cam_lost = CameraLost(kmotion_dir, feed) if cam_lost.reboot_camera(): sys.exit() else: usage() sys.exit(1)
__version__="1.0.1"
""" Root finding methods ==================== Routines in this module: bisection(f, a, b, eps=1e-5) newton1(f, df, eps=1e-5) newtonn(f, J, x0, eps=1e-5) secant(f, x0, x1, eps=1e-5) inv_cuadratic_interp(f, a, b, c, eps=1e-5) lin_fracc_interp(f, a, b, c, eps=1e-5) broyden(f, x0, B0, eps=1e-5) """ import numpy as np ''' * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright (C) 4/24/17 Carlos Brito This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ''' __all__ = ['bisection', 'newton1', 'secant', 'newtonn', 'inv_cuadratic_interp', 'lin_fracc_interp', 'brent'] def bisection(f, a, b, eps=1e-5, display=False): """ Find root of f. This function computes a root of the function f using the bisection method. Parameters ---------- f : function Function we want to find the root of. a : float Lower bound. b : float High bound. eps : float Tolerance. Returns ------- m : float Root of f. iterations : int Number of iterations taken to find root. """ iterations = 0 if a > b: a, b = b, a while((b - a) > eps): m = a + np.float32(b - a) / 2. if (np.sign(f(a)) == np.sign(f(m))): a = m else: b = m if display: print 'iteration ', iterations print 'm: ', m iterations += 1 return m, iterations def newton1(f, df, x0, eps=1e-5, display=False): """ Find root of f. This method computes the root of f using Newton's method. Parameters ---------- f : function Function we want to find the root of. df : function Derivative of f. x0 : float This is the starting point for the method. eps : float Tolerance. Returns ------- root : float Root of f. """ iterations = 0 x_old = np.float(x0) x_new = x_old while(True): try: x_old = x_new x_new = x_old - f(x_old) / df(x_old) if display: print 'iteration ', iterations print 'x: ', x_new iterations += 1 if(abs(x_old - x_new) <= eps): break except(ZeroDivisionError): return np.nan root = x_new return root, iterations def secant(f, x0, x1, eps=1e-5, display=False): """ Parameters ---------- f : function Function we want to find the root of. x0 : float First initial value "close" to the root of f. x1: float Second initial value "close" to the root of f. eps : float Tolerance. Returns ------- root : float Root of f. iterations : int Number of iterations taken to find root. """ iterations = 0 x_old_0 = x0 x_old_1 = x1 x_new = x0 - f(x0) * (x1 - x0) / (f(x1) - f(x0)) while True: x_old_0 = x_old_1 x_old_1 = x_new x_new = x_old_1 - f(x_old_1) * \ ((x_old_1 - x_old_0) / (f(x_old_1) - f(x_old_0))) if display: print 'iteration ', iterations print 'x: ', x_new iterations += 1 if(abs(x_old_1 - x_new) < eps): break root = x_new return root, iterations def inv_cuadratic_interp(f, a, b, c, eps=1e-5, display=False): """ Find root of f. This method finds the root of f using the inverse cuadratic interpolation method. Parameters ---------- f : function Function we want to find the root of. a : float First initial value. b : float Second initial value. c : float Third initial value. Returns ------- root : float Root of f. iterations : int Number of iterations taken to find root. """ iterations = 0 while True: u = f(b) / f(c) v = f(b) / f(a) w = f(a) / f(c) p = v * (w * (u - w) * (c - b) - (1 - u) * (b - a)) q = (w - 1) * (u - 1) * (v - 1) x_new = b + p / q a = b b = c c = x_new if display: print 'iteration ', iterations print 'x: ', x_new iterations += 1 if(abs(f(x_new)) < eps): break root = x_new return root, iterations def lin_fracc_interp(f, a, b, c, eps=1e-5, display=False): """ Find root of f. This method finds the root of f using the linear fractional interpolation method. Parameters ---------- f : function Function we want to find the root of. a : float First initial value. b : float Second initial value. c : float Third initial value. Returns ------- root : float Root of f. iterations : int Number of iterations taken to find root. """ iterations = 0 while True: numerator = (a - c) * (b - c) * (f(a) - f(b)) * f(c) denominator = (a - c) * (f(c) - f(b)) * f(a) - \ (b - c) * (f(c) - f(a)) * f(b) h = numerator / denominator x_new = c + h a = b b = c c = x_new if display: print 'iteration ', iterations print 'x: ', x_new iterations += 1 if(abs(f(x_new)) < eps): break root = x_new return root, iterations def broyden(f, x0, B0, eps=1e-5, display=False): """ Finds roots for functions of k-variables. This function utilizes Broyden's method to find roots in a k-dimensional function f utilizing the initial Jacobian B0 at x0. Parameters ---------- f : function which takes an array_like matrix and returns an array_like matrix Function we want to find the root of. x0 : array_like Initial point. B0 : array_like Jacobian of function at x0. eps : float Error tolerance. Returns ------- root : array_like Root of function. iterations : int Number of iterations taken to find root. """ iterations = 0 x_new = x0 B_new = B0 while True: x_old = x_new B_old = B_new s = np.dot(np.linalg.inv(B_old), -f(x_old).T) # solve for s x_new = x_old + s y = f(x_new) - f(x_old) B_new = B_old + (np.dot((y - np.dot(B_old, s)), s.T) ) / (np.dot(s.T, s)) if display: print 'iteration ', iterations print 'x:', x_new print 'B', B_new iterations += 1 # convergence check if(np.all(np.abs(x_old - x_new) <= eps)): break root = x_new return root, iterations def newtonn(f, J, x0, eps=1e-5, display=False): """ Finds roots for functions of k-variables. This function utilizes Newton's method for root finding to find roots in a k-dimensional function. To do this, it takes the Jacobian of the function and an initial point. Parameters ---------- f : function which takes an array_like matrix and returns an array_like matrix J : function returning an array_like matrix Jacobian of function. x0 : array_like Initial point. eps : float Error tolerance. Returns ------- root : array_like Root of function. iterations : int Number of iterations taken to find root. """ iterations = 0 x_new = x0 try: while True: x_old = x_new x_new = x_old - np.dot(np.linalg.inv(J(x_old)), f(x_old)) if display: print 'iteration ', iterations print 'x: ', x_new iterations += 1 # convergence check if(np.all(np.abs(x_old - x_new) <= eps)): break except np.linalg.LinAlgError: print 'Error during iteration. Matrix is probably singular' return None root = x_new return root, iterations def brent(f, a, b, eps=1e-5, display=False): """ Finds root of a one dimensional function. This function utilizes Brent's method for root finding to find roots in a one dimensional function. To do this, it needs a function and an interval which contains the root. Parameters ---------- f : function Function we want to find the root of. a : float Low bound of interval b : float High bound of interval eps : float Tolerance. Returns ------- root : float Root of function. iterations : int Number of iterations taken to find root. """ iterations = 0 mflag = False d = 0. if f(a) * f(b) >= 0: raise ValueError('root is not bracketed') if(abs(f(a)) < abs(f(b))): a, b = b, a # swap vlaues c = a mflag = True while (True): if f(a) != f(c) and f(b) != f(c): # inverse quadratic interpolation s = (a * f(b) * f(c)) / ((f(a) - f(b)) * (f(a) - f(c))) + \ (b * f(a) * f(c)) / ((f(b) - f(a)) * (f(b) - f(c))) + \ (c * f(a) * f(b)) / ((f(c) - f(a)) * (f(c) - f(b))) else: # secant method s = b - f(b) * (b - a) / (f(b) - f(a)) tmp1 = (3. * a + b) / 4. tmp2 = b if tmp1 > tmp2: tmp1, tmp2 = tmp2, tmp1 if not (tmp1 < s < tmp2) or \ mflag and (abs(s - b)) >= (abs(c - d) / 2.) or \ not mflag and (abs(s - b)) >= (abs(c - d) / 2.) or \ mflag and (abs(b - c)) < abs(eps) or \ not mflag and (abs(c - d)) < abs(eps): # bisection method s = (a + b) / 2. mflag = True else: mflag = False d = c c = b if f(a) * f(s) < 0: b = s else: a = s if abs(f(a)) < abs(f(b)): a, b = b, a if display: print 'iteration: ', iterations print 'x: ', s iterations += 1 # convergence check if f(b) == 0 or f(s) == 0 or (abs(b - a) < eps): break root = s return root, iterations
''' Copyright (C) 2014 Janina Mass This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/> ''' import sys import getopt import subprocess import threading import os import shutil import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy from distutils import spawn global GAP GAP = "-" class Alignment(object): def __init__(self, id=None, fasta = None, members = []): self.id = id self.fasta = fasta self.members = [] self.gapPos = [] self.mismatchPos = [] self.matchPos = [] self.matchGapPos = [] self.attachSequences() self.calcNumbers() def __repr__(self): ids = self.members return("Alignment:{},{}".format(self.id, ids)) def __len__(self): try: return(len(self.members[0].sequence)) except TypeError as e: sys.stderr.write(e) sys.stderr.write("attachSequences first") return(0) def getStats(self): res = "" res+="{},{},{},{},{},{}".format(len(self.matchPos),len(self.matchGapPos),len(self.mismatchPos),len(self)-len(self.gapPos),len(self.gapPos),len(self) ) return(res) def attachSequences(self): fp = FastaParser() print("FASTA:", self.fasta) for f in fp.read_fasta(self.fasta): newSeq = Sequence(id = f[0], sequence = f[1]) self.members.append(newSeq) def calcNumbers(self): for i in range(0, len(self)): curpos = [m.sequence[i] for m in self.members] if GAP in curpos: #dynamic penalty: tmp = "".join(curpos) gappyness = tmp.count(GAP)/float(len(self.members)) half = len(self.members)/2.0 if gappyness > half: toPunish = [m for m in self.members if m.sequence[i]!=GAP] for t in toPunish: t._dynamicPenalty+=gappyness elif gappyness < half: #punish gappers toPunish = [m for m in self.members if m.sequence[i]==GAP] for t in toPunish: t._dynamicPenalty+=1-gappyness else: pass #/dyn penalty self.gapPos.append(i) #sequences that cause gaps: gappers = [m for m in self.members if m.sequence[i] == GAP] for m in gappers: m.gapsCaused.append(i) #unique gaps caused: if len(gappers) == 1: m.uniqueGapsCaused.append(i) #insertions inserters = [m for m in self.members if m.sequence[i] != GAP] for m in inserters: m.insertionsCaused.append(i) #unique insertions caused: if len(inserters) == 1: m.uniqueInsertionsCaused.append(i) nongap = [c for c in curpos if c != GAP] cpset = set(curpos) if (len(cpset) >1 and GAP not in cpset): self.mismatchPos.append(i) for m in self.members: m.mismatchShared.append(i) elif (len(cpset) == 1 and GAP not in cpset): self.matchPos.append(i) for m in self.members: m.matchShared.append(i) elif (len(cpset)==2 and GAP in cpset and len(nongap)>2): self.matchGapPos.append(i) def showAlignment(self, numbers = False): res = [] mmPos = [] alignmentLength = len(self.members[0].sequence) for i in range(0, alignmentLength): curpos = [m.sequence[i] for m in self.members] if numbers: res.append(str(i)+" "+" ".join(curpos)) else: res.append(" ".join(curpos)) return("\n".join(res)) class Sequence(): def __init__(self, id = "", sequence = None, isForeground = False): self.id = id self.sequence = sequence self.isForeground = isForeground self.insertionsCaused = [] #positions self.uniqueInsertionsCaused = [] self.gapsCaused = []#positions self.uniqueGapsCaused = [] self.matchShared = [] self.mismatchShared = [] self._penalty = None # penalize by site: # > n/2 gaps (@site): penalyze inserts by gaps/n # < n/2 gaps (@site): penalyze gaps by inserts/n self._dynamicPenalty = 0 def setForeground(self, bool = True): self.isForeground = bool def __repr__(self): return("Sequence: {}".format(self.id)) @property def penalty(self, uniqueGapPenalty=10, uniqueInsertPenalty=10, gapPenalty = 1, insertPenalty =1 ): self.penalty =sum([ len(self.insertionsCaused)*insertPenalty, len(self.uniqueInsertionsCaused)*uniqueGapPenalty, len(self.gapsCaused)*gapPenalty, len(self.uniqueGapsCaused)*uniqueGapPenalty]) return(self.penalty) def summary(self): s = "" s+=self.id s+="insertionsCaused:{},uniqueInsertionsCaused:{}, gapsCaused:{}, uniqueGapsCaused:{}, penalty:{}, dynPenalty:{}".format(len(self.insertionsCaused), len(self.uniqueInsertionsCaused), len(self.gapsCaused), len(self.uniqueGapsCaused), self.penalty, self._dynamicPenalty) return(s) def getCustomPenalty(self,gapPenalty, uniqueGapPenalty, insertionPenalty , uniqueInsertionPenalty, mismatchPenalty, matchReward): res = (len(self.gapsCaused)-len(self.uniqueGapsCaused))* gapPenalty\ + len(self.uniqueGapsCaused)*uniqueGapPenalty\ + (len(self.insertionsCaused)-len(self.uniqueInsertionsCaused)) * insertionPenalty\ + len(self.uniqueInsertionsCaused) * uniqueInsertionPenalty\ + len(self.mismatchShared)* mismatchPenalty\ + len(self.matchShared) *matchReward return(res) class FastaParser(object): def read_fasta(self, fasta, delim = None, asID = 0): """read from fasta fasta file 'fasta' and split sequence id at 'delim' (if set)\n example:\n >idpart1|idpart2\n ATGTGA\n and 'delim="|"' returns ("idpart1", "ATGTGA") """ name = "" fasta = open(fasta, "r") while True: line = name or fasta.readline() if not line: break seq = [] while True: name = fasta.readline() name = name.rstrip() if not name or name.startswith(">"): break else: seq.append(name) joinedSeq = "".join(seq) line = line[1:] if delim: line = line.split(delim)[asID] yield (line.rstrip(), joinedSeq.rstrip()) fasta.close() def usage(): print (""" ###################################### # pysickle.py v0.1.1 ###################################### usage: pysickle.py -f multifasta alignment options: -f, --fasta=FILE multifasta alignment (eg. "align.fas") OR -F, --fasta_dir=DIR directory with multifasta files (needs -s SUFFIX) -s, --suffix=SUFFIX will try to work with files that end with SUFFIX (eg ".fas") -a, --msa_tool=STR supported: "mafft" [default:"mafft"] -i, --max_iterations=NUM force stop after NUM iterations -n, --num_threads=NUM max number of threads to be executed in parallel [default: 1] -m, --mode=MODE set strategy to remove outlier sequences [default: "Sites"] available modes (not case sensitive): "Sites", "Gaps", "uGaps","Insertions", "uInsertions","uInstertionsGaps", "custom" -l, --log write logfile -h, --help prints this only for mode "custom": -g, --gap_penalty=NUM set gap penalty [default: 1.0] -G, --unique_gap_penalty=NUM set unique gap penalty [default: 10.0] -j, --insertion_penalty=NUM set insertion penalty [default:1.0] -J, --unique_insertion_penalty=NUM set insertion penalty [default:1.0] -M, --mismatch_penalty=NUM set mismatch penalty [default:1.0] -r, --match_reward=NUM set match reward [default: -10.0] """) sys.exit(2) def checkPath(progname): #TODO extend avail = ["mafft"] if progname.lower() not in avail: raise Exception("Program not supported. Only {} allowed.".format(",".join(avail))) else: path = spawn.find_executable(progname) print("Found {} in {}\n".format(progname, path)) if not path: raise Exception("Could not find {} on your system! Exiting. Available options:{}\n".format(progname, ",".join(avail))) sys.exit(127) def checkMode(mode): avail = ["sites", "gaps", "ugaps","insertions", "uinsertions", "uinsertionsgaps", "custom"] if mode not in avail: raise Exception("Mode {} not available. Only {} allowed\n".format(mode, ",".join(avail))) class TooFewSequencesException(Exception): pass def adjustDir(dirname, mode): if mode == "unisertionsgaps": abbr = "uig" else: abbr = mode[0:2] return(dirname+"_"+abbr) def getSeqToKeep(alignment, mode, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty , mismatch_penalty, match_reward): if mode == "sites": toKeep = removeDynamicPenalty(alignment) elif mode == "gaps": toKeep = removeCustomPenalty(alignment, gapPenalty=1, uniqueGapPenalty=1, insertionPenalty =0, uniqueInsertionPenalty=0, mismatchPenalty=0, matchReward = 0) if not toKeep: removeDynamicPenalty(alignment) elif mode == "ugaps": toKeep = removeMaxUniqueGappers(alignment) if not toKeep: toKeep = removeDynamicPenalty(alignment) return(toKeep) elif mode == "insertions": toKeep = removeCustomPenalty(alignment, gapPenalty=0, uniqueGapPenalty=0, insertionPenalty =1, uniqueInsertionPenalty=1, mismatchPenalty=0, matchReward = 0) if not toKeep: removeDynamicPenalty(alignment) elif mode == "uinsertions": toKeep = removeMaxUniqueInserters(alignment) if not toKeep: removeDynamicPenalty(alignment) elif mode == "uinsertionsgaps": toKeep = removeMaxUniqueInsertsPlusGaps(alignment) if not toKeep: removeDynamicPenalty(alignment) elif mode == "custom": toKeep = removeCustomPenalty(alignment, gapPenalty=gap_penalty, uniqueGapPenalty=unique_gap_penalty, insertionPenalty =insertion_penalty, uniqueInsertionPenalty=unique_insertion_penalty, mismatchPenalty=mismatch_penalty, matchReward = match_reward) if not toKeep: removeDynamicPenalty(alignment) else: raise Exception("Sorry, sth went wrong at getSeqToKeep\n") return(toKeep) def schoenify(fasta=None, max_iter=None, finaldir = None, tmpdir = None, msa_tool = None,mode = None, logging = None, gap_penalty= None, unique_gap_penalty = None, insertion_penalty = None, unique_insertion_penalty = None, mismatch_penalty = None, match_reward = None ): if not fasta: raise Exception("Schoenify: Need alignment in fasta format.") else: arr = numpy.empty([1, 8], dtype='int32') iteration = 0 fastabase = os.path.basename(fasta) statsout = finaldir+os.sep+fastabase+".info" tabout = finaldir + os.sep+fastabase+".csv" resout = finaldir +os.sep+fastabase+".res" if logging: info = open(statsout,"w") iterTab = [] headerTab = ["matches", "matchesWithGaps","mismatches"," nogap", "gaps","length","iteration","numSeq"] alignmentstats = [] newAlignment = Alignment(fasta = fasta) #sanity check if len(newAlignment.members) < 3: raise TooFewSequencesException("Need more than 2 sequences in alignment: {}\n".format(newAlignment.fasta)) if not max_iter or (max_iter > len(newAlignment.members)-2): max_iter = len(newAlignment.members)-2 print("#max iterations:{}".format(str(max_iter))) while (iteration < max_iter): toKeep = getSeqToKeep(alignment = newAlignment, mode = mode, gap_penalty = gap_penalty, unique_gap_penalty = unique_gap_penalty, insertion_penalty=insertion_penalty, unique_insertion_penalty = unique_insertion_penalty, mismatch_penalty=mismatch_penalty, match_reward=match_reward) print("# iteration: {}/{} \n".format(iteration, max_iter)) if len(toKeep) <2 : break res= "" for k in toKeep: seq ="".join([s for s in k.sequence if s !=GAP]) res+=(">{}\n{}\n".format(k.id,seq)) iterfile = tmpdir+os.sep+".".join(fastabase.split(".")[0:-1])+"."+str(iteration) with open(iterfile+".tmp",'w') as out: out.write(res) #log if logging: for m in newAlignment.members: info.write(m.summary()+"\n") #log alignmentstats.append(newAlignment.getStats().split(",")) iterTab.append((",".join(x for y in alignmentstats for x in y))+","+ str(iteration)+","+str(len(newAlignment.members))) alignmentstats = [] iteration +=1 if msa_tool == "mafft": proc = subprocess.Popen(["mafft","--auto", iterfile+".tmp"], stdout=open(iterfile+".out",'w'), bufsize=1) proc.communicate() newAlignment = Alignment(id = iterfile, fasta=iterfile+".out") #TODO extend if logging: info.close() with open(tabout, 'w') as taboutf: taboutf.write(",".join(headerTab)) taboutf.write("\n") taboutf.write("\n".join(iterTab )) for i in iterTab: row = [int(j) for j in i.split(",")] arr = numpy.vstack((arr,numpy.array(row))) #delete row filled with zeros arr = numpy.delete(arr,0,0) ########### LOCK.acquire() plt.figure(1) plt.suptitle(fastabase, fontsize=12) ax = plt.subplot(3,1,1) for i,l in zip([0,1,2,3,4,5,6,7],['match','matchWithGap','mismatch','nogap','gap','length','iteration','numSeq' ]): if not i in [6,7]: plt.plot(arr[:,6], arr[:,i], label=l) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) ax= plt.subplot(3,1,2) plt.plot(arr[:,6], arr[:,7]) ax.set_ylabel('count') ax.legend(["numSeq"],bbox_to_anchor=(1.05, 0.3), loc=2, borderaxespad=0.) ax= plt.subplot(3,1,3) scoring =(arr[:,5]-arr[:,4])*arr[:,7] try: maxIndex = scoring.argmax() with open(resout,'w')as resouth: resouth.write("# Ranking: {}\n".format(scoring[:].argsort()[::-1])) resouth.write("# Best set: {}".format(str(maxIndex))) plt.plot(arr[:,6],scoring) ax.legend(["(length-gaps)*numSeq"],bbox_to_anchor=(1.05, 0.3), loc=2, borderaxespad=0.) ax.set_xlabel('iteration') plt.savefig(finaldir+os.sep+fastabase+'.fun.png', bbox_inches='tight') plt.clf() finalfa = tmpdir+os.sep+".".join(fastabase.split(".")[0:-1])+"."+str(maxIndex)+".tmp" finalfabase = os.path.basename(finalfa) shutil.copy(finalfa,finaldir+os.sep+finalfabase) except ValueError as e: sys.stderr.write(str(e)) finally: LOCK.release() def removeMaxUniqueGappers(alignment): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") s = alignment.showAlignment(numbers = True) mxUniqueGaps = max([len(k.uniqueGapsCaused) for k in alignment.members]) keepers = [k for k in alignment.members if len(k.uniqueGapsCaused) < mxUniqueGaps] return(keepers) def removeMaxUniqueInserters(alignment): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") s = alignment.showAlignment(numbers = True) mxUniqueIns = max([len(k.uniqueInsertionsCaused) for k in alignment.members]) keepers = [k for k in alignment.members if len(k.uniqueInsertionsCaused) < mxUniqueIns] return(keepers) def removeMaxPenalty(alignment): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") s = alignment.showAlignment(numbers = True) mx = max([k.penalty for k in alignment.members]) keepers = [k for k in alignment.members if k.penalty < mx] return(keepers) def removeCustomPenalty(alignment, gapPenalty=None, uniqueGapPenalty=None, insertionPenalty=None, uniqueInsertionPenalty=None, mismatchPenalty=None, matchReward = None): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") mx = max([k.getCustomPenalty( gapPenalty =gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty,mismatchPenalty=mismatchPenalty, matchReward = matchReward) for k in alignment.members]) print("MAX",mx) print([k.getCustomPenalty(gapPenalty=gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty ,mismatchPenalty=mismatchPenalty, matchReward = matchReward) for k in alignment.members ]) keepers = [k for k in alignment.members if k.getCustomPenalty(gapPenalty=gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty ,mismatchPenalty=mismatchPenalty, matchReward = matchReward) < mx] return(keepers) def removeDynamicPenalty(alignment): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") s = alignment.showAlignment(numbers = True) mx = max([k._dynamicPenalty for k in alignment.members]) keepers = [k for k in alignment.members if k._dynamicPenalty < mx] return(keepers) def removeMaxUniqueInsertsPlusGaps(alignment): if not isinstance(alignment, Alignment): raise Exception("Must be of class Alignment") s = alignment.showAlignment(numbers = True) mxUniqueIns = max([len(k.uniqueInsertionsCaused)+len(k.uniqueGapsCaused) for k in alignment.members]) keepers = [k for k in alignment.members if (len(k.uniqueInsertionsCaused)+len(k.uniqueGapsCaused)) < mxUniqueIns] return(keepers) class SchoenifyThread(threading.Thread): def __init__(self,fasta, max_iter, finaldir,tmpdir, msa_tool, mode, logging, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty, mismatch_penalty , match_reward): super(SchoenifyThread, self).__init__() self.fasta=fasta self.max_iter=max_iter self.finaldir=finaldir self.tmpdir = tmpdir self.msa_tool =msa_tool self.mode = mode self.logging = logging #custom self.gap_penalty = gap_penalty self.unique_gap_penalty = unique_gap_penalty self.insertion_penalty = insertion_penalty self.unique_insertion_penalty = unique_insertion_penalty self.mismatch_penalty = mismatch_penalty self.match_reward = match_reward def run(self): SEMAPHORE.acquire() try: schoenify(fasta=self.fasta, max_iter = self.max_iter, finaldir=self.finaldir,tmpdir = self.tmpdir, msa_tool=self.msa_tool, mode = self.mode, logging = self.logging, gap_penalty= self.gap_penalty, unique_gap_penalty = self.unique_gap_penalty, insertion_penalty = self.insertion_penalty, unique_insertion_penalty = self.unique_insertion_penalty, mismatch_penalty = self.mismatch_penalty, match_reward = self.match_reward ) except TooFewSequencesException as e: sys.stderr.write(str(e)) SEMAPHORE.release() def getFastaList(dir=None, suffix=None): for f in os.listdir(dir): if f.endswith(suffix): yield(os.sep.join([dir,f])) def main(): fastalist = [] fastadir = None suffix= None max_iter = None finaldir = None tmpdir = None msa_tool = "mafft" num_threads = 1 mode = "sites" logging = False #custom penalty: gap_penalty = 1.0 unique_gap_penalty = 10.0 insertion_penalty = 1.0 unique_insertion_penalty = 1.0 mismatch_penalty = 1.0 match_reward = -10.0 try: opts, args = getopt.gnu_getopt(sys.argv[1:], "f:F:s:i:a:n:m:g:G:j:J:M:r:lh", ["fasta=","fasta_dir=","suffix=","max_iteration=","msa_tool=", "num_threads=","mode=","gap_penalty", "unique_gap_penalty", "insertion_penalty=", "unique_insertion_penalty=", "mismatch_penalty=", "match_reward=", "log","help"]) except getopt.GetoptError as err: print (str(err)) usage() for o, a in opts: if o in ("-f", "--fasta"): fastalist = a.split(",") statsout = fastalist[0]+".info" tabout = fastalist[0]+".csv" finaldir = os.path.dirname(fastalist[0])+"ps_out" tmpdir = os.path.dirname(fastalist[0])+"ps_tmp" elif o in ("-h","--help"): usage() elif o in ("-n", "--num_threads"): num_threads = int(a) elif o in ("-F","--fasta_dir"): fastadir = a finaldir = fastadir+os.sep+"ps_out" tmpdir = fastadir+os.sep+"ps_tmp" elif o in ("-s", "--suffix"): suffix = a elif o in ("-i", "--max_iteration"): max_iter = int(a) elif o in ("-a", "--msa_tool"): msa_tool = a.lower() elif o in ("-m", "--mode"): mode = a.lower() elif o in ("-l", "--log"): logging = True #only for mode "custom": elif o in ("-g", "--gap_penalty"): gap_penalty = float(a) elif o in ("-G","--unique_gap_penalty"): unique_gap_penalty = float(a) elif o in ("-j", "--insertion_penalty"): insertion_penalty = float(a) elif o in ("-J", "--unique_insertion_penalty"): unique_insertion_penalty = float(a) elif o in ("-M", "--mismatch_penalty"): mismatch_penalty = float(a) elif o in ("-r", "--match_reward"): match_reward = float(a) else: assert False, "unhandled option" if not fastalist and not (fastadir and suffix): usage() else: checkPath(progname = msa_tool) checkMode(mode=mode) finaldir = adjustDir(finaldir, mode) tmpdir = adjustDir(tmpdir, mode) global SEMAPHORE SEMAPHORE=threading.BoundedSemaphore(num_threads) if not os.path.exists(finaldir): os.mkdir(finaldir) if not os.path.exists(tmpdir): os.mkdir(tmpdir) if fastadir: print(suffix) for f in getFastaList(fastadir, suffix): print(f) fastalist.append(f) for fasta in fastalist: SchoenifyThread(fasta, max_iter,finaldir,tmpdir, msa_tool, mode, logging, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty, mismatch_penalty , match_reward).start() LOCK = threading.Lock() SEMAPHORE = threading.BoundedSemaphore() if __name__ == "__main__": main()
from emburse.resource import ( EmburseObject, Account, Allowance, Card, Category, Company, Department, Label, Location, Member, SharedLink, Statement, Transaction ) class Client(EmburseObject): """ Emburse API Client API enables for the creation of expense cards at scale for custom business solutions as well as for third-party app integrations. Cards can be created with set spending limits and assigned with just an email. Some use cases include vendor payments, employee expense control, and fleet card management. API Version: v1 API Docs: https://www.emburse.com/api/v1/docs#getting-started Authors: Marc Ford <marc.ford@gmail.com> """ @property def Account(self): """ Emburse Account Object, configured with the auth token from the client :return: A configured emburse.resource.Account :rtype: Account """ return Account(auth_token=self.auth_token) @property def Allowance(self): """ Emburse Allowance Object, configured with the auth token from the client :return: A configured emburse.resource.Allowance :rtype: Allowance """ return Allowance(auth_token=self.auth_token) @property def Card(self): """ Emburse Card Object, configured with the auth token from the client :return: A configured emburse.resource.Card :rtype: Card """ return Card(auth_token=self.auth_token) @property def Category(self): """ Emburse Category Object, configured with the auth token from the client :return: A configured emburse.resource.Category :rtype: Category """ return Category(auth_token=self.auth_token) @property def Company(self): """ Emburse Company Object, configured with the auth token from the client :return: A configured emburse.resource.Company :rtype: Company """ return Company(auth_token=self.auth_token) @property def Department(self): """ Emburse Department Object, configured with the auth token from the client :return: A configured emburse.resource.Department :rtype: Department """ return Department(auth_token=self.auth_token) @property def Label(self): """ Emburse Label Object, configured with the auth token from the client :return: A configured emburse.resource.Label :rtype: Label """ return Label(auth_token=self.auth_token) @property def Location(self): """ Emburse Location Object, configured with the auth token from the client :return: A configured emburse.resource.Location :rtype: Location """ return Location(auth_token=self.auth_token) @property def Member(self): """ Emburse Member Object, configured with the auth token from the client :return: A configured emburse.resource.Member :rtype: Member """ return Member(auth_token=self.auth_token) @property def SharedLink(self): """ Emburse SharedLink Object, configured with the auth token from the client :return: A configured emburse.resource.SharedLink :rtype: SharedLink """ return SharedLink(auth_token=self.auth_token) @property def Statement(self): """ Emburse Statement Object, configured with the auth token from the client :return: A configured emburse.resource.Statement :rtype: Statement """ return Statement(auth_token=self.auth_token) @property def Transaction(self): """ Emburse Transaction Object, configured with the auth token from the client :return: A configured emburse.resource.Transaction :rtype: Transaction """ return Transaction(auth_token=self.auth_token)
from django.conf.urls import url from . import views urlpatterns = [ url(r'^([a-zA-Z0-9_\-]+)/$', views.poll, name='poll'), url(r'^([a-zA-Z0-9_\-]+).csv$', views.poll, {'export': True}, name='poll_export_csv'), url(r'^([a-zA-Z0-9_\-]+)/comment/$', views.comment, name='poll_comment'), url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/edit/$', views.comment, name='poll_comment_edit'), url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/delete/$', views.delete_comment, name='poll_deleteComment'), url(r'^([a-zA-Z0-9_\-]+)/watch/$', views.watch, name='poll_watch'), url(r'^([a-zA-Z0-9_\-]+)/settings/$', views.settings, name='poll_settings'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/$', views.edit_choice, name='poll_editChoice'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/date/$', views.edit_date_choice, name='poll_editDateChoice'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/date/$', views.edit_dt_choice_date, name='poll_editDTChoiceDate'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/time/$', views.edit_dt_choice_time, name='poll_editDTChoiceTime'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/combinations/$', views.edit_dt_choice_combinations, name='poll_editDTChoiceCombinations'), url(r'^([a-zA-Z0-9_\-]+)/edit/choices/universal/$', views.edit_universal_choice, name='poll_editUniversalChoice'), url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues/', views.edit_choicevalues, name='poll_editchoicevalues'), url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues_create', views.edit_choicevalues_create, name='poll_editchoicevalues_create'), url(r'^([a-zA-Z0-9_\-]+)/delete/$', views.delete, name='poll_delete'), url(r'^([a-zA-Z0-9_\-]+)/vote/$', views.vote, name='poll_vote'), url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/assign/$', views.vote_assign, name='poll_voteAssign'), url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/edit/$', views.vote, name='poll_voteEdit'), url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/delete/$', views.vote_delete, name='poll_voteDelete'), url(r'^([a-zA-Z0-9_\-]+)/copy/$', views.copy, name='poll_copy'), ]
import unittest import base import todo class AppendTest(base.BaseTest): def test_append(self): todo.cli.addm_todo("\n".join(self._test_lines_no_pri(self.num))) for i in range(1, self.num + 1): todo.cli.append_todo([str(i), "testing", "append"]) self.assertNumLines(self.num, "Test\s\d+\stesting\sappend") if __name__ == "__main__": unittest.main()
import json from django.http import HttpResponse from django.contrib.auth.models import User from django.shortcuts import get_object_or_404 from apps.exercises.models import Attempts from apps.maps.models import Graphs from apps.ki.utils import performInference from apps.research.utils import getParticipantByUID, studyFilter def knowledge_inference(request, gid=""): if request.method == "GET": g = get_object_or_404(Graphs, pk=gid) if not request.user.is_authenticated(): return HttpResponse(status=403) u, uc = User.objects.get_or_create(pk=request.user.pk) p = getParticipantByUID(request.user.pk, gid) if g.study_active and p is None: return HttpResponse(status=401) ex = Attempts.objects.filter(graph=g).filter(submitted=True) ex = studyFilter(g, p, u, ex) inferences = [] if ex.count() > 1: r = [e.get_correctness() for e in ex] inferences = performInference(g.concept_dict, r) return HttpResponse(json.dumps(inferences), mimetype='application/json') else: return HttpResponse(status=405)
copying_str = \ ''' GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. ''' warranty_str = \ ''' 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. ''' """ KJV indexer and search modules. BibleSearch: Can index and search the 'KJV' sword module using different types of searches, including the following: Strongs number search - Searches for all verses containing either the phrase strongs phrase, any strongs number or a superset of the strongs numbers. Morphological tags search - Same as the strongs... Word or phrase search - Same as the strongs... Regular expression search - Searches the whole Bible using the provided regular expression. """ from sys import argv, exit from cmd import Cmd from difflib import get_close_matches from functools import wraps from time import strftime from textwrap import fill from collections import defaultdict from itertools import product import os import sys import json import re from .utils import * try: import bla from .sword_verses import * except ImportError: Sword = None from .verses import * COLOR_LEVEL = 3 highlight_color = '\033[7m' highlight_text = '%s\\1\033[m' % highlight_color word_regx = re.compile(r'\b([\w-]+)\b') strip_color_regx = re.compile('\033\[[\d;]*m') def render_raw2(verse_text, strongs=False, morph=False): """ Render raw verse text. """ strong_regx = re.compile(r'strong:([GH]\d+)', re.I) morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I) test_regx = re.compile(r''' ([^<]*) <(?P<tag>seg|q|w|transChange|note)([^>]*)> ([\w\W]*?) </(?P=tag)> ([^<]*) ''', re.I | re.X) divname_regx = re.compile(r''' <(?:divineName)> ([^<]*?) ([\'s]*) </(?:divineName)> ''', re.I | re.X) div_upper = lambda m: m.group(1).upper() + m.group(2) marker_regx = re.compile(r'.*marker="(.)".*', re.I) info_print(verse_text, tag=4) def recurse_tag(text): """ Recursively parse raw verse text using regular expressions, and returns the correctly formatted text. """ v_text = '' for match in test_regx.finditer(text): opt, tag_name, tag_attr, tag_text, punct = match.groups() strongs_str = '' morph_str = '' italic_str = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s' if 'note' in tag_name.lower() or 'study' in tag_attr.lower(): note_str = ' <n>%s</n>' else: note_str = '%s' if strongs and strong_regx.search(tag_attr): strongs_list = strong_regx.findall(tag_attr) strongs_str = ' <%s>' % '> <'.join(strongs_list) if morph and morph_regx.search(tag_attr): morph_list = morph_regx.findall(tag_attr) morph_str = ' {%s}' % '} {'.join(morph_list) if match.re.search(tag_text): temp_text = recurse_tag(tag_text) + strongs_str + morph_str v_text += note_str % italic_str % (temp_text) else: info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4) opt = marker_regx.sub('<p>\\1</p> ', opt) tag_text = divname_regx.sub(div_upper, tag_text) tag_text = note_str % italic_str % tag_text v_text += opt + tag_text + strongs_str + morph_str v_text += punct return v_text return recurse_tag(verse_text) def render_raw(verse_text, strongs=False, morph=False): """ Render raw verse text. """ strong_regx = re.compile(r'strong:([GH]\d+)', re.I) morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I) test_regx = re.compile(r''' ([^<]*) <(?P<tag>q|w|transChange|note)([^>]*)> ([\w\W]*?) </(?P=tag)> ([^<]*) ''', re.I | re.X) divname_regx = re.compile(r''' (?:<seg>)? <(?:divineName)>+ ([^<]*?) ([\'s]*) </(?:divineName)> (?:</seg>)? ''', re.I | re.X) xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>', re.I) div_upper = lambda m: m.group(1).upper() + m.group(2) marker_regx = re.compile(r'.*marker="(.)".*', re.I) v_text = '' info_print(verse_text, tag=4) for match in test_regx.finditer(verse_text): opt, tag_name, tag_attr, tag_text, punct = match.groups() italic_str = '%s' if match.re.search(tag_text): if 'added' in tag_attr.lower(): italic_str = '<i>%s</i>' + punct punct = '' match_list = match.re.findall(tag_text + punct) else: match_list = [match.groups()] temp_text = '' for opt, tag_name, tag_attr, tag_text, punct in match_list: info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4) tag_text = divname_regx.sub(div_upper, tag_text) tag_text = xadded_regx.sub('<i>\\1</i>', tag_text) if 'marker' in opt.lower(): temp_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt) opt = '' if 'note' in tag_name.lower() or 'study' in tag_attr.lower(): temp_text += ' <n>%s</n>' % tag_text tag_text = '' temp_italic = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s' temp_text += temp_italic % (opt + tag_text) if tag_name.strip().lower() in ['transchange', 'w', 'seg']: if strong_regx.search(tag_attr) and strongs: temp_text += \ ' <%s>' % '> <'.join(strong_regx.findall(tag_attr)) if morph_regx.search(tag_attr) and morph: temp_text += \ ' {%s}' % '} {'.join(morph_regx.findall(tag_attr)) temp_text += punct v_text += italic_str % temp_text continue opt, tag_name, tag_attr, tag_text, punct = match.groups() tag_text = divname_regx.sub( lambda m: m.group(1).upper() + m.group(2), tag_text) if 'marker' in opt.lower(): v_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt) if 'added' in tag_attr.lower(): v_text += '<i>' elif 'note' in tag_name.lower() or 'study' in tag_attr.lower(): v_text += ' <n>%s</n>' % tag_text if match.re.search(tag_text): for i in match.re.finditer(tag_text): info_print(i.groups(), tag=4) o, t_n, t_a, t_t, p = i.groups() if t_n.strip().lower() in ['transchange', 'w']: v_text += o + t_t if strong_regx.search(t_a) and strongs: v_text += \ ' <%s>' % '> <'.join(strong_regx.findall(t_a)) if morph_regx.search(t_a) and morph: v_text += \ ' {%s}' % '} {'.join(morph_regx.findall(t_a)) v_text += p else: if tag_name.strip().lower() in ['transchange', 'w']: v_text += tag_text if strong_regx.search(tag_attr) and strongs: v_text += \ ' <%s>' % '> <'.join(strong_regx.findall(tag_attr)) if morph_regx.search(tag_attr) and morph: v_text += \ ' {%s}' % '} {'.join(morph_regx.findall(tag_attr)) if 'added' in tag_attr.lower(): v_text += '</i>' v_text += punct info_print('%s: %s: %s: %s: %s' % (opt, tag_name, tag_attr, tag_text, punct), tag=4) return v_text def render_verses_with_italics(ref_list, wrap=True, strongs=False, morph=False, added=True, notes=False, highlight_func=None, module='KJV', *args): """ Renders a the verse text at verse_ref with italics highlighted. Returns a strong "verse_ref: verse_text" ref_list - List of references to render wrap - Whether to wrap the text. strongs - Include Strong's Numbers in the output. morph - Include Morphological Tags in the output. added - Include added text (i.e. italics) in the output. notes - Include study notes at the end of the text. highlight_func - A function to highlight anything else (i.e. search terms.) module - Sword module to render from. *args - Any additional arguments to pass to hightlight_func highlight_func should take at least three arguments, verse_text, strongs, and morph. """ # Set the colors of different items. end_color = '\033[m' # Build replacement strings that highlight Strong's Numbers and # Morphological Tags. if COLOR_LEVEL >= 2: # The Strong's and Morphology matching regular expressions. # Match strongs numbers. strongs_regx = re.compile(r''' <((?:\033\[[\d;]*m)*?[GH]?\d+?(?:\033\[[\d;]*m)*?)> ''', re.I | re.X) # It needs to match with braces or it will catch all capitalized # word and words with '-'s in them. info_print("Rendering results, please wait...\n", tag=0) morph_regx = re.compile(r''' \{((?:\033\[[\d+;]*m)*?[\w-]*?(?:\033\[[\d+;]*m)*?)\} ''', re.X) strongs_color = '\033[36m' morph_color = '\033[35m' strongs_highlight = '<%s\\1%s>' % (strongs_color, end_color) morph_highlight = '{%s\\1%s}' % (morph_color, end_color) if COLOR_LEVEL >= 0: ref_color = '\033[32m' ref_highlight = '%s\\1%s' % (ref_color, end_color) if COLOR_LEVEL >= 1 and added: italic_color = '\033[4m' italic_regx = re.compile(r'<i>\s?(.*?)\s?</i>', re.S) italic_highlight = '%s\\1%s' % (italic_color, end_color) # Get the local text encoding. encoding = get_encoding() # A substitution replacement function for highlighting italics. def italic_color(match): """ Color italic text, but first remove any previous color. """ # Strip any previous colors. match_text = strip_color_regx.sub('', match.groups()[0]) # Color the italics. return word_regx.sub(italic_highlight, match_text) # Get an iterator over all the requested verses. verse_iter = IndexedVerseTextIter(iter(ref_list), strongs, morph, italic_markers=(COLOR_LEVEL >= 1), added=added, paragraph=added, notes=notes, module=module) if VERBOSE_LEVEL == 20: verse_iter = VerseTextIter(iter(ref_list), strongs, morph, module=module, markup=1, #Sword.FMT_PLAIN, render='render_raw') if VERBOSE_LEVEL >= 30: verse_iter = RawDict(iter(ref_list), module=module) for verse_ref, verse_text in verse_iter: if VERBOSE_LEVEL >= 30: len_longest_key = len(max(verse_text[1].keys(), key=len)) for key, value in verse_text[1].items(): print('\033[33m{0:{1}}\033[m: {2}'.format(key, len_longest_key, value)) verse_text = verse_text[1]['_verse_text'][0] # Encode than decode the verse text to make it compatable with # the locale. verse_text = verse_text.strip().encode(encoding, 'replace') verse_text = verse_text.decode(encoding, 'replace') verse_text = '%s: %s' % (verse_ref, verse_text) # The text has to be word wrapped before adding any color, or else the # color will add to the line length and the line will wrap too soon. if wrap: verse_text = fill(verse_text, screen_size()[1], break_on_hyphens=False) if COLOR_LEVEL >= 0: # Color the verse reference. colored_ref = word_regx.sub(ref_highlight, verse_ref) verse_text = re.sub(verse_ref, colored_ref, verse_text) if COLOR_LEVEL >= 1 and added: # Highlight the italic text we previously pulled out. verse_text = italic_regx.sub(italic_color, verse_text) if COLOR_LEVEL >= 2: # Highlight Strong's and Morphology if they are visible. if strongs: verse_text = strongs_regx.sub(strongs_highlight, verse_text) if morph: verse_text = morph_regx.sub(morph_highlight, verse_text) if COLOR_LEVEL >= 3: # Highlight the different elements. if highlight_func: verse_text = highlight_func(verse_text, *args) # Finally produce the formated text. yield verse_text def highlight_search_terms(verse_text, regx_list, highlight_text, color_tag='\033\[[\d+;]*m', *args): """ Highlight search terms in the verse text. """ def highlight_group(match): """ Highlight each word/Strong's Number/Morphological Tag in the match. """ match_text = match.group() for word in set(match.groups()): if word: # and word != match_text: # if word.lower() == 'strong' and word == match_text: # continue info_print(word, tag=20) try: match_text = re.sub(''' ( (?:{0}|\\b)+ {1} (?:{0}|\\b)+ ) '''.format(color_tag, re.escape(word)), highlight_text, match_text, flags=re.X) except Exception as err: info_print("Error with highlighting word %s: %s" % \ (word, err), tag=4) #match_text = match_text.replace(word, '\033[7m%s\033[m' % word) # print(match_text) return match_text # Strip any previous colors. # match_text = strip_color_regx.sub('', match.group()) # return word_regx.sub(highlight_text, match_text) verse_text = verse_text.strip() # Apply each highlighting regular expression to the text. for regx in regx_list: verse_text = regx.sub(highlight_group, verse_text) return verse_text def build_highlight_regx(search_list, case_sensitive, sloppy=False, color_tag='\033\[[\\\\d+;]*m', extra_tag='\033'): """ Build a regular expression and highlight string to colorize the items in search_list as they appear in a verse. """ if not search_list: return [] regx_list = [] # Extra word boundry to catch ansi color escape sequences. escaped_word_bound = '(?:{0}|\\\\b)+'.format(color_tag) word_bound = '(?:{0}|\\b)+'.format(color_tag) # Extra space filler to pass over ansi color escape sequences. extra_space = '|{0}|{1}'.format(color_tag, extra_tag) # print(word_bound, extra_space, '(?:\033\[[\d+;]*m|\\b)+') for item in search_list: item = item.strip() is_regex = (('*' in item and ' ' not in item) or item.startswith('&')) if ('*' in item and ' ' not in item) and not item.startswith('&'): # Build a little regular expression to highlight partial words. item = item[1:] if item[0] in '!^+|' else item item = item.replace('*', '\w*') item = r'{0}({1}){0}'.format(word_bound, item) if item.startswith('&'): # Just use a regular expression. ('&' marks the term as a regular # expression.) item = item[1:] regx_list.append(Search.search_terms_to_regex(item, case_sensitive, word_bound=escaped_word_bound, extra_space=extra_space, sloppy=(sloppy or '~' in item), is_regex=is_regex)) return regx_list def mod_lookup(mod, items): """ Looks up items in a module and returns the formated text. """ item_lookup = Lookup(mod) # Seperate all elements by a comma. item_list = ','.join(items.split()).split(',') text_list = [] for item in item_list: item_text = item_lookup.get_formatted_text(item) text_list.append('\033[1m%s\033[m:\n%s' % (item, item_text)) return '\n\n'.join(text_list) class StdoutRedirect(object): """ Redirect stdout to a specified output function. """ def __init__(self, output_func, *args): """ Set the output function and get the extra arguments to pass to it. """ self._output_func = output_func self._args = args self._old_stdout = sys.stdout def write(self, data): """ Write data to the output function. """ if data.strip(): self._output_func(data, *self._args) def __enter__(self): """ Change sys.stdout to this class. """ try: sys.stdout = self return self except Exception as err: print("Error in __enter__: %s" % err, file=sys.stderr) return None def __exit__(self, exc_type, exc_value, traceback): """ Change sys.stdout back to its old value. """ try: sys.stdout = self._old_stdout if exc_type: return False return True except Exception as err: print("Error in __exit__: %s" % err, file=sys.stderr) return False class IndexedVerseTextIter(object): """ An iterable object for accessing verses in the Bible. Maybe it will be easier maybe not. """ def __init__(self, reference_iter, strongs=False, morph=False, module='KJV', italic_markers=False, added=True, paragraph=True, notes=False, path=''): """ Initialize. """ reg_list = [] if not strongs: reg_list.append(r'\s*<([GH]\d+)>') if not morph: reg_list.append(r'\s*\{([\w-]+)\}') if not added: reg_list.append(r'\s?<i>\s?(.*?)\s?</i>') if not italic_markers: reg_list.append(r'(<i>\s?|\s?</i>)') if not paragraph: reg_list.append(r'\s?<p>\s?(.*?)\s?</p>') else: reg_list.append(r'(<p>\s?|\s?</p>)') reg_str = r'(?:%s)' % r'|'.join(reg_list) self._clean_regex = re.compile(reg_str, re.S) self._notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S) self._notes_str = ' (Notes: \\1)' if notes else '' self._index_dict = IndexDict('%s' % module, path) self._ref_iter = reference_iter def next(self): """ Returns the next verse reference and text. """ return self.__next__() def __next__(self): """ Returns a tuple of the next verse reference and text. """ # Retrieve the next reference. verse_ref = next(self._ref_iter) # Set the verse and render the text. verse_text = self._get_text(verse_ref) return (verse_ref, verse_text.strip()) def __iter__(self): """ Returns an iterator of self. """ return self def _get_text(self, verse_ref): """ Returns the verse text. Override this to produce formatted verse text. """ verse_text = self._index_dict[verse_ref] verse_text = self._clean_regex.sub('', verse_text) verse_text = self._notes_regex.sub(self._notes_str, verse_text) return verse_text class CombinedParse(object): """ A parser for simple combined search parsing. ((in OR tree) AND the) AND (house OR bush) => ['in the house', 'in the bush', 'tree the house', 'tree the bush'] Also it has a NOT word list. created NOT (and OR but) => ['created'] ['and', 'but'] """ def __init__(self, arg_str): """ Initialize the parser and parse the arg string. """ self._arg_str = arg_str self._arg_list = arg_str.split() parsed_list = self.parse_string(list(arg_str)) self._word_list, self._not_list = self.parse_list(parsed_list) # Make the results accesable via read-only properties. word_list = property(lambda self: self._word_list) not_list = property(lambda self: self._not_list) def parse_list(self, arg_list): """ Parse a list such as ['created', 'NOT', ['and', 'OR', 'but']] into search_args = ['created'] not_list = ['and', 'but'] """ # The list we're working on building. working_list = [] # The list of words not to include. not_list = [] for i in arg_list: # Skip 'OR's if i == 'OR': continue if isinstance(i, list): # A list was found so parse it and get the results. temp_list, temp_not_list = self.parse_list(i) # Add the returned not list to the current not list. not_list.extend(temp_not_list) if working_list: if working_list[-1] == 'AND': # Pop the 'AND' off the end of the list. working_list.pop() # Combine each element of the working listh with each # element of the returned list replace the working # list with those combinations. # (i.e. working_list = ['this', 'that'] # temp_list = ['tree', 'house'] # result = ['this tree', 'this house', # 'that tree', 'that house'] working_list = ['%s %s' % j \ for j in product(working_list, temp_list)] elif working_list[-1] == 'NOT': # Take the 'NOT' off to show we've processed it. working_list.pop() # Add the returned list to the NOT list. not_list.extend(temp_list) else: # Just extend the working list with the retuned list. working_list.extend(temp_list) else: # Just extend the working list with the retuned list. working_list.extend(temp_list) else: if i == 'AND': # Put the 'AND' on the list for later processing. working_list.append(i) elif working_list: if working_list[-1] == 'AND': # Take the 'AND' off the list. working_list.pop() # Combine all the elements of working_list with i, and # replace working list with the resulting list. # (i.e. working_list = ['he', 'it'] i = 'said' # result = ['he said', 'it said'] working_list = ['%s %s' % (j, i) for j in working_list] elif working_list[-1] == 'NOT': # Remove the 'NOT'. working_list.pop() # Add the word to the not list. not_list.append(i) else: # Add the word to the working list. working_list.append(i) else: # Add the word to the working list. working_list.append(i) # Split and then combine all the strings in working_list. # Basically removes runs of whitespace. working_list = [' '.join(i.split()) for i in working_list] # Return the final list and not list. return working_list, not_list def parse_parenthesis(self, arg_list): """ Recursively processes strings in parenthesis converting them to nested lists of strings. """ # The return list. return_list = [] # Temorary string. temp_str = '' while arg_list: # Get the next character. c = arg_list.pop(0) if c == '(': # An opening parenthesis was found so split the current string # at the spaces putting them in the return list, and clean # the string. if temp_str: return_list.extend(temp_str.split()) temp_str = '' # Process from here to the closing parenthesis. return_list.append(self.parse_parenthesis(arg_list)) elif c == ')': # The parenthesis is closed so return back to the calling # function. break else: # Append the current not parenthesis character to the string. temp_str += c if temp_str: # Split and add the string to the return list. return_list.extend(temp_str.split()) # Return what we found. return return_list def parse_string(self, arg_list): """ Parse a combined search arg string. Convert a string such as: 'created NOT (and OR but)' => ['created', 'NOT', ['and', 'OR', 'but']] """ # This does the same thing only using json. # # Regular expression to group all words. #word_regx = re.compile(r'\b(\w*)\b') # Put quotes around all words and opening replace paranthesis with # brackets, put all of that in brackets. #temp_str = '[%s]' % word_regx.sub('"\\1"', arg_str).replace('(', '[') # Replace closing parenthesis with brackets and replace a '" ' with # '", '. #temp_str = temp_str.replace(')', ']').replace('" ', '",') # finally replace '] ' with '], '. The end result should be a valid # json string that can be converted to a list. #temp_str = temp_str.replace('] ', '],') # Convert the string to a list. #return_list = json.loads(temp_str) #return return_list # The return list. return_list = [] # Temporary string. temp_str = '' while arg_list: # Pop the next character. c = arg_list.pop(0) if c == '(': # A parenthesis was found store and reset the string. # And parse the what is in the parenthesis. if temp_str: return_list.extend(temp_str.split()) temp_str = '' return_list.append(self.parse_parenthesis(arg_list)) else: # Append the non parenthesis character to the string. temp_str += c if temp_str: # Store the final string in the list. return_list.extend(temp_str.split()) #info_print(return_list) # Return the list. return return_list class Search(object): """ Provides a simple way of searching an IndexDict for verses. """ # To check for spaces. _whitespace_regx = re.compile(r'\s') # Cleanup regular expressions. _non_alnum_regx = re.compile(r'[^\w\*<>\{\}\(\)-]') _fix_regx = re.compile(r'\s+') # Match strongs numbers. _strongs_regx = re.compile(r'[<]?(\b[GH]\d+\b)[>]?', re.I) # It needs to match with braces or it will catch all capitalized # word and words with '-'s in them. _morph_regx = re.compile(r'[\(\{](\b[\w-]+\b)[\}\)]', re.I) _word_regx = re.compile(r'\b([\w\\-]+)\b') _space_regx = re.compile(r'\s+') _non_word_regx = re.compile(r'[<>\(\)]') _fix_strongs = classmethod(lambda c, m: '<%s>' % m.groups()[0].upper()) _fix_morph = classmethod(lambda c, m: '{%s}' % m.groups()[0].upper()) # Escape the morphological tags. _escape_morph = classmethod(lambda c, m: \ '\{%s\}' % re.escape(m.groups()[0]).upper()) def __init__(self, module='KJV', path='', multiword=False): """ Initialize the search. """ # The index dictionary. self._index_dict = IndexDict(module, path) self._module_name = module self._multi = multiword @classmethod def search_terms_to_regex(cls, search_terms, case_sensitive, word_bound='\\\\b', extra_space='', sloppy=False, is_regex=False): """ Build a regular expression from the search_terms to match a verse in the Bible. """ # Set the flags for the regular expression. flags = re.I if not case_sensitive else 0 if is_regex: reg_str = search_terms info_print('\nUsing regular expression: %s\n' % reg_str, tag=2) try: return re.compile(reg_str, flags) except Exception as err: print("An error occured while compiling the highlight " "regular expression %s: %s." % (reg_str, err), " There will be no highlighting.\n", file=sys.stderr) return re.compile(r'') # This will skip words. not_words_str = r'\b\w+\b' # This will skip Strong's Numbers. not_strongs_str = r'<[^>]*>' # This wil skip Morphological Tags. not_morph_str = r'\{[^\}]*\}' # This will skip all punctuation. Skipping ()'s is a problem for # searching Morphological Tags, but it is necessary for the # parenthesized words. May break highlighting. not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]' # This will skip ansi color. not_color_str = r'\033\[[\d;]*m' # Match all *'s star_regx = re.compile(r'\*') # Hold the string that fills space between search terms. space_str = '' # Get stars past so we can replace them with '\w*' later. temp_str, word_count = star_regx.subn(r'_star_', search_terms) # Hack to get rid of unwanted characters. temp_str = cls._non_alnum_regx.sub(' ', temp_str).split() temp_str = ' '.join(temp_str) # Phrases will have spaces in them phrase = bool(cls._whitespace_regx.search(temp_str)) # Escape the morphological tags, and also find how many there are. temp_str, morph_count = cls._morph_regx.subn(cls._escape_morph, temp_str) # Make all Strong's Numbers uppercase, also find how many there are. temp_str, strongs_count = cls._strongs_regx.subn(cls._fix_strongs, temp_str) # Select all words. #repl = '(\\\\b\\1\\\\b)' # This works: # temp_str, word_count = \ # cls._word_regx.subn('{0}(\\1){0}'.format(word_bound), temp_str) repl = '(?:{0}(\\1){0})'.format(word_bound) temp_str, word_count = cls._word_regx.subn(repl, temp_str) # Replace what used to be *'s with '\w*'. temp_str = temp_str.replace('_star_', '\w*') # All the Strong's and Morphology were changed in the previous # substitution, so if that number is greater than the number of # Strong's plus Morphology then there were words in the search terms. # I do this because I don't know how to only find words. words_found = (strongs_count + morph_count) < word_count if phrase: # Build the string that is inserted between the items in the # search string. space_str = r'(?:%s%s' % (not_punct_str, extra_space) if not bool(strongs_count) or sloppy: # Skip over all Strong's Numbers. space_str = r'%s|%s' % (space_str, not_strongs_str) if not bool(morph_count) or sloppy: # Skip all Morphological Tags. space_str = r'%s|%s' % (space_str, not_morph_str) if not words_found or bool(morph_count) or bool(strongs_count) or \ sloppy: # Skip words. If word attributes are in the search we can # skip over words and still keep it a phrase. space_str = r'%s|%s' % (space_str, not_words_str) # Finally make it not greedy. space_str = r'%s)*?' % space_str else: space_str = '' # Re-combine the search terms with the regular expression string # between each element. reg_str = space_str.join(temp_str.split()) info_print('\nUsing regular expression: %s\n' % reg_str, tag=2) try: return re.compile(reg_str, flags) except Exception as err: print("An error occured while compiling the highlight " "regular expression %s: %s." % (reg_str, err), " There will be no highlighting.\n", file=sys.stderr) return re.compile(r'') def _sorted_iter(self, verse_ref_set): """ Returns an iterator over a sorted version of verse_ref_set. """ # Speed up the iteration by first sorting the range. return iter(sorted(verse_ref_set, key=sort_key)) def _clean_text(self, text): """ Return a clean (only alphanumeric) text of the provided string. """ # Do we have to use two regular expressions to do this. # Replace all non-alphanumeric characters with a space. temp_text = self._non_alnum_regx.sub(' ', text) # Replace one or more spaces with one space. clean_text = self._fix_regx.sub(' ', temp_text) return clean_text.strip() def _fix_strongs_morph(self, search_terms): """ Make any Strong's or Morphology uppercase, put parenthesis around the Morphological Tags, and put <>'s around the Strong's Numbers. """ # Capitalize all strongs numbers and remove the <> from them. temp_str = self._strongs_regx.sub(self._fix_strongs, search_terms) # Capitalize all morphological tags and make sure they are in # parenthesis. temp_str = self._morph_regx.sub(self._fix_morph, temp_str) return temp_str def _process_search(func): """ Returns a wrapper function that processes the search terms, calls the wrapped function, and, if applicable, confines the resulting verse set to a range. """ @wraps(func) def wrapper(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ Process the search terms according to the wrapped functions requirements, then apply the range, if given, to the returned set of verses. """ if func.__name__ in ['sword_search']: if not Sword: print("Sword library not found.") return if not isinstance(search_terms, str): # Combine the terms for use by the different methods. search_terms = ' '.join(search_terms) # Get a valid set of verse references that conform to the passed # range. range_set = parse_verse_range(range_str) if func.__name__ not in ['regex_search', 'partial_word_search']: # Try to catch and fix any Strong's Numbers or Morphological # Tags. search_terms = self._fix_strongs_morph(search_terms) # Regular expression and combined searches get the search terms as # they were passed. if func.__name__ in ['multiword_search', 'anyword_search', 'phrase_search', 'mixed_phrase_search']: # Get rid of any non-alphanumeric or '-' characters from # the search string. search_str = self._clean_text(search_terms).strip() if strongs or morph: # Strong's numbers and Morphological tags are all # uppercase. This is only required if the Morphological # Tags were not surrounded by parenthesis. search_str = search_str.upper().strip() else: search_str = search_terms # Get the set of found verses. found_set = func(self, search_str, strongs, morph, added, case_sensitive, range_set) # The phrase, regular expression, and combined searches apply the # range before searching, so only multi-word and any-word searches # have it applied here. if func.__name__ in ['multiword_search', 'anyword_search', 'partial_word_search']: if range_set: found_set.intersection_update(range_set) return found_set # Return wrapper function. return wrapper @_process_search def combined_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ combined_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str=''): -> Perform a combined search. Search terms could be 'created NOT (and OR but)' and it would find all verses with the word 'created' in them and remove any verse that had either 'and' or 'but.' search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for '%s'..." % search_terms, tag=1) # Process the search_terms. arg_parser = CombinedParse(search_terms) # Get the list of words and/or phrases to include. word_list = arg_parser.word_list # Get the list of words and/or phrases to NOT include. not_list = arg_parser.not_list phrase_search = self.phrase_search multiword_search = self.multiword_search def combine_proc(str_list): """ Performs combined search on the strings in str_list, and returns a set of references that match. """ and_it = False temp_set = set() for word in str_list: # A '+' before or after a word means it should have a phrase # search done on it and the words with it. if '+' in word: # Do a phrase search on the word string. result_set = phrase_search(word.replace('+', ' '), strongs, morph, case_sensitive, range_str) elif word == '&': # Combine the next search results with this one. and_it = True continue else: # Do a multi-word search on the word string. result_set = multiword_search(word, strongs, morph, case_sensitive, range_str) if and_it: # The previous word said to find verses that match both. temp_set.intersection_update(result_set) and_it = False else: # Only keep the verses that have either one group or the # other but not both. temp_set.symmetric_difference_update(result_set) return temp_set # Remove any verses that have the NOT words in them. found_set = combine_proc(word_list).difference(combine_proc(not_list)) return found_set @_process_search def combined_phrase_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ combined_phrase_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str=''): -> Perform a combined phrase search. Search terms could be 'created NOT (and AND but)' and it would find all verses with the word 'created' in them and remove any verse that had the phrase 'and but.' search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for '%s'..." % search_terms, tag=1) # Process the search_terms. arg_parser = CombinedParse(search_terms) # Get the list of words and/or phrases to include. word_list = arg_parser.word_list # Get the list of words and/or phrases to NOT include. not_list = arg_parser.not_list phrase_search = self.phrase_search def combine_proc(str_list): """ Performs combined phrase search on the strings in str_list, and returns a set of references that match. """ temp_set = set() for word in str_list: # Do a phrase search on the word string. result_set = phrase_search(word.replace('+', ' '), strongs, morph, case_sensitive, range_str) # Include all the verses that have any of the word groups. temp_set.update(result_set) return temp_set # Remove any verses that have the NOT words in them. found_set = combine_proc(word_list).difference(combine_proc(not_list)) return found_set @_process_search def multiword_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ multiword_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a multiword search using the search_terms. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with all these words " "'%s'..." % ', '.join(search_terms.split()), tag=1) # All that needs to be done is find all references with all the # searched words in them. found_set = self._index_dict.value_intersect(search_terms.split(), case_sensitive) return found_set @_process_search def eitheror_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ eitheror_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a search returning any verse with one and only one of the terms searched for. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with one and not all of these words " "'%s'..." % ', '.join(search_terms.split()), tag=1) # Any verse with one and only one of the searched words. found_set = self._index_dict.value_sym_diff(search_terms.split(), case_sensitive) return found_set @_process_search def anyword_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ anyword_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a search returning any verse with one or more of the search terms. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with any of these words " "'%s'..." % ', '.join(search_terms.split()), tag=1) # Any verse with one or more of the searched words. found_set = self._index_dict.value_union(search_terms.split(), case_sensitive) return found_set @_process_search def partial_word_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ partial_word_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a search returning any verse with one or more words matching the partial words given in the search terms. Partial words are markes tih *'s (e.g. '*guil*' will match any word with 'guil' in it such as 'guilt' or 'beguile.' search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with any of these partial words " "'%s'..." % ', '.join(search_terms.split()), tag=1) #found_set = self._index_dict.value_union( #self._words_from_partial(search_terms, case_sensitive), #case_sensitive) search_list = search_terms.split() found_set = self._index_dict.from_partial(search_list, case_sensitive) return found_set def _words_from_partial(self, partial_word_list, case_sensitive=False): """ Search through a list of partial words and yield words that match. """ flags = re.I if not case_sensitive else 0 # Split the search terms and search through each word key in the index # for any word that contains the partial word. word_list = partial_word_list.split() for word in self._index_dict['_words_']: for partial_word in word_list: # A Regular expression that matches any number of word # characters for every '*' in the term. reg_str = '\\b%s\\b' % partial_word.replace('*', '\w*') try: word_regx = re.compile(reg_str, flags) except Exception as err: print('There is a problem with the regular expression ' '%s: %s' % (reg_str, err), file=sys.stderr) exit() if word_regx.match(word): yield word def _process_phrase(func): """ Returns a wrapper function for wrapping phrase like searches. """ @wraps(func) def wrapper(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ Gets a regular expression from the wrapped function, then builds a set of verse references to search, finally it calls the searching function with the regular expression and the verse reference iterator, and returns the resulting set of references. """ search_regx = func(self, search_terms, strongs, morph, added, case_sensitive, range_str) # First make sure we are only searching verses that have all the # search terms in them. search_list = search_terms.split() if '*' in search_terms: ref_set = self._index_dict.from_partial(search_list, case_sensitive, common_limit=5000) else: ref_set = self._index_dict.value_intersect(search_list, case_sensitive) if range_str: # Only search through the supplied range. ref_set.intersection_update(range_str) # No need to search for a single word phrase. if len(search_terms.split()) == 1: return ref_set # Sort the list so it may be a little faster. Only needed if we're # using the sword module to look them up. ref_iter = self._sorted_iter(ref_set) # Disable Strong's and Morphological if only words are used. strongs = bool(self._strongs_regx.search(search_terms)) morph = bool(self._morph_regx.search(search_terms)) return self.find_from_regex(ref_iter, search_regx, strongs, morph) return wrapper @_process_search @_process_phrase def ordered_multiword_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ ordered_multiword_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform an ordered multiword search. Like a multiword search, but all the words have to be in order. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with these words in order " "'%s'..." % search_terms, tag=1) return self.search_terms_to_regex(search_terms, case_sensitive, sloppy=True) @_process_search @_process_phrase def phrase_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ phrase_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a phrase search. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with this phrase " "'%s'..." % search_terms, tag=1) # Make all the terms the same case if case doesn't matter. flags = re.I if not case_sensitive else 0 if strongs: # Match strongs phrases. search_reg_str = search_terms.replace(' ', r'[^<]*') elif morph: # Match morphological phrases. search_reg_str = search_terms.replace(' ', r'[^\{]*') else: # Match word phrases search_reg_str = '\\b%s\\b' % search_terms.replace(' ', r'\b(<[^>]*>|\{[^\}]*\}|\W)*\b') # Make a regular expression from the search terms. return re.compile(search_reg_str, flags) @_process_search @_process_phrase def mixed_phrase_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ mixed_phrase_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a phrase search. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for verses with this phrase " "'%s'..." % search_terms, tag=1) # Make a regular expression from the search terms. return self.search_terms_to_regex(search_terms, case_sensitive) @_process_search def regex_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ regex_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a regular expression search. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ info_print("Searching for regular expression '%s'..." % search_terms, tag=1) # re.I is case insensitive. flags = re.I if not case_sensitive else 0 try: # Make a regular expression from the search_terms. search_regx = re.compile(r'%s' % search_terms, flags) except Exception as err: print('There is a problem with the regular expression "%s": %s' % \ (search_terms, err), file=sys.stderr) exit() if range_str: # Only search through the supplied range. ref_iter = self._sorted_iter(range_str) else: # Search the entire Bible. ref_iter = VerseIter('Genesis 1:1') return self.find_from_regex(ref_iter, search_regx, strongs, morph, tag=1, try_clean=True) def find_from_regex(self, ref_iter, search_regex, strongs=False, morph=False, added=True, tag=3, try_clean=False): """ Iterates through all the verses in the ref iter iterator and returns a list of verses whose text matches search_regx. """ # Get an iterator that will return tuples # (verse_reference, verse_text). verse_iter = IndexedVerseTextIter(ref_iter, strongs=strongs, morph=morph, added=added, module=self._module_name) found_set = set() for verse_ref, verse_text in verse_iter: info_print('\033[%dD\033[KSearching...%s' % \ (len(verse_ref) + 20, verse_ref), end='', tag=tag) # Search for matches in the verse text. if search_regex.search(verse_text): found_set.add(verse_ref) elif try_clean and not strongs and not morph: # Should we do this or should we trust the user knows what # puctuation are in the verses? clean_verse_text = self._clean_text(verse_text) if search_regex.search(clean_verse_text): found_set.add(verse_ref) info_print("...Done.", tag=tag) return found_set def mixed_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ mixed_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='') -> Perform a mixed search. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. added - Search in the added text (i.e. italics). case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. """ found_set = set() not_set = set() and_set = set() or_set = set() xor_set = set() combine_dict = { '!': not_set.update, '+': and_set.intersection_update, '|': or_set.update, '^': xor_set.symmetric_difference_update, } for term in search_terms: if term[0] in '!+^|': # Set the correct combining function, and cleanup the item. if term[0] == '+' and not and_set: # All of these verses go in the output. combine_func = and_set.update else: combine_func = combine_dict[term[0]] term = term[1:] else: if self._multi and found_set: # If multiword is default and found_set is not empty # make all search terms appear in the output. combine_func = found_set.intersection_update else: # Any of these verses could be in the output combine_func = found_set.update if term.startswith('&'): # Allow regular expression searching. term = term[1:] search_func = self.regex_search elif ' ' in term: # Search term is a quoted string, so treat it like a phrase. if term.startswith('~'): # ~'s trigger ordered multiword or sloppy phrase search. term = term[1:] search_func = self.ordered_multiword_search else: search_func = self.mixed_phrase_search elif '*' in term: # Search for partial words. search_func = self.partial_word_search else: # A single word should be (multi/any)-word. search_func = self.multiword_search # Perform a strongs search. strongs = bool(self._strongs_regx.match(term.upper())) # Perform a morpholagical search. morph = bool(self._morph_regx.match(term.upper())) # Search for words or phrases. temp_set = search_func(term, strongs, morph, added, case_sensitive, range_str) # Add the results to the correct set. combine_func(temp_set) # Update the result set. found_set.update(or_set) found_set.update(xor_set) if and_set and found_set: # Make sure all the verses that are in the output have the words # or phrases that hade a '+' in front of them. found_set = and_set.union(found_set.intersection(and_set)) elif and_set: # Found set must be empty to fill it with and_set's contents. found_set.update(and_set) # Finally remove all the verses that are in the not_set. found_set.difference_update(not_set) return found_set def sword_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str='', search_type='lucene'): """ sword_search(self, search_terms, strongs=False, morph=False, case_sensitive=False, range_str='', search_type=-4) -> Use the sword module to search for the terms. search_terms - Terms to search for. strongs - Search for Strong's Number phrases. morph - Search for Morphological Tag phrases. case_sensitive - Perform a case sensitive search. range_str - A verse range to limit the search to. search_type - What search type to use. """ search_terms = ' '.join(search_terms) info_print("Searching using the Sword library for " "'%s'..." % search_terms, tag=1) found_set = set() search_type_dict = { 'regex': 0, 'phrase': -1, 'multiword': -2, 'entryattrib': -3, # (e.g. Word//Lemma//G1234) 'lucene': -4 } try: # Render the text as plain. markup = Sword.MarkupFilterMgr(Sword.FMT_PLAIN) # Don't own this or it will crash. markup.thisown = False mgr = Sword.SWMgr(markup) # Load the module. module = mgr.getModule(self._module_name) # Set the search type based on the search_type argument. search_type = search_type_dict.get(search_type.lower(), -4) # Make sure we can search like this. if not module.isSearchSupported(search_terms, search_type): print("Search not supported", file=sys.stderr) return found_set() # Get the range key. if not range_str: range_str = 'Genesis-Revelation' range_k = Sword.VerseKey().parseVerseList(range_str, 'Genesis 1:1', True) flags = re.I if not case_sensitive else 0 if strongs: # Search for strongs numbers. # I don't know how to search for morphological tags using # Swords search function. prefix = 'lemma:' for term in ','.join(search_terms.split()).split(','): if not term.startswith('lemma:'): # Make the term start with lemma: so sword will find # it. term = '%s%s' % (prefix, term) # Perform the search. resource = module.doSearch(term, search_type, flags, range_k) # Get the list of references from the range text. found_set.update(resource.getRangeText().split('; ')) else: # Perform the search. resource = module.doSearch(search_terms, search_type, flags, range_k) # Get the list of references from the range text. found_set.update(resource.getRangeText().strip().split('; ')) except Exception as err: print("There was a problem while searching: %s" % err, file=sys.stderr) found_set.discard('') return found_set @_process_search def test_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ A Test. """ ref_set = self._index_dict.value_union(search_terms.split(), case_sensitive) if range_str: # Only search through the supplied range. ref_set.intersection_update(range_str) ref_list = sorted(ref_set, key=sort_key) term_dict = defaultdict(list) raw_dict = RawDict(iter(ref_list), self._module_name) words_len = 0 for verse_ref, (verse_text, verse_dict) in raw_dict: for term in search_terms.split(): if self._strongs_regx.match(term): num = self._strongs_regx.sub('\\1', term) words = set(verse_dict[num.upper()]) if words: term_dict[num.upper()].append({verse_ref: words}) elif self._morph_regx.match(term): tag = self._morph_regx.sub('\\1', term) words = set(verse_dict[tag.upper()]) if words: term_dict[tag.upper()].append({verse_ref: words}) else: for key, value in verse_dict['_words'][0].items(): if ' %s ' % term.lower() in ' %s ' % key.lower(): attr_dict = value[0] if strongs and 'strongs' in attr_dict: attr_list = attr_dict['strongs'] attr_list.append(key) term_dict[term].append({verse_ref: attr_list}) if morph and 'morph' in attr_dict: attr_list = attr_dict['morph'] attr_list.append(key) words_len = max(len(attr_list), words_len) term_dict[term].append({verse_ref: attr_list}) len_longest_ref = len(max(ref_set, key=len)) for key, value in term_dict.items(): words_len = max([len(i) for d in value for i, v in d.items()]) print('%s:' % key) for dic in value: ref, words = tuple(dic.items())[0] if isinstance(words, list): w_str = '"%s"' % '", "'.join(words[:-1]) l_str = '"%s"' % words[-1] words_str = '{0:{2}}: {1}'.format(w_str, l_str, words_len) else: words_str = '"%s"' % '", "'.join(words) print('\t{0:{1}}: {2}'.format(ref, len_longest_ref, words_str)) #print('\t{0:{1}}: "{2}"'.format(ref, len_longest_ref, # '", "'.join(words))) exit() @_process_search def test2_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ A Test. """ ref_set = self._index_dict.value_union(search_terms.split(), case_sensitive) if range_str: # Only search through the supplied range. ref_set.intersection_update(range_str) ref_iter = iter(sorted(ref_set, key=sort_key)) # Get an iterator that will return tuples # (verse_reference, verse_text). verse_iter = IndexedVerseTextIter(ref_iter, strongs=True, morph=morph, added=added, module=self._module_name) # This will skip words. not_words_str = r'\b\w+\b' # This will skip Strong's Numbers. not_strongs_str = r'<[^>]*>' # This wil skip Morphological Tags. not_morph_str = r'\{[^\}]*\}' # This will skip all punctuation. Skipping ()'s is a problem for # searching Morphological Tags, but it is necessary for the # parenthesized words. May break highlighting. not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]' max_ref_len = len(max(ref_set, key=len)) found_set = set() term_dict = defaultdict(list) for verse_ref, verse_text in verse_iter: for term in search_terms.split(): if self._strongs_regx.match(term): test_regx = re.compile(r''' \s ((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+) \s ((?:%s)+) ''' % term, re.I | re.X) elif self._morph_regx.match(term): test_regx = re.compile(r''' \s((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+) (?:<[^>]*>|\s)+ ((?:%s)+) ''' % term, re.I | re.X) else: test_regx = re.compile(r''' ((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])*? %s (?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)+ ((?:<[^>]*>|\{[^\}]*\}|\s)+) ''' % term, re.I | re.X) for match in test_regx.finditer(verse_text): phrase, num = match.groups() phrase = phrase.strip(',').strip('.').strip() phrase = phrase.strip(';').strip('?').strip(':').strip() num = num.replace('<', '').replace('>', '') num = num.replace('{', '').replace('}', '') if not phrase or not num.strip(): if not strongs: break print(verse_ref, verse_text) print(match.group(), match.groups()) exit() num = '"%s"' % '", "'.join(num.split()) term_dict[term].append( '\t{0:{1}}: {2:{4}}: "{3}"'.format(verse_ref, max_ref_len, num, phrase, 18) ) for term, lst in term_dict.items(): term = term.replace('<', '').replace('>', '') term = term.replace('{', '').replace('}', '') print('%s:\n%s' % (term, '\n'.join(lst))) exit() @_process_search def test3_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ A Test. """ ref_set = self._index_dict.value_union(search_terms.split(), case_sensitive) if range_str: # Only search through the supplied range. ref_set.intersection_update(range_str) if not ref_set: exit() ref_iter = iter(sorted(ref_set, key=sort_key)) # Get an iterator that will return tuples # (verse_reference, verse_text). verse_iter = VerseTextIter(ref_iter, strongs=strongs, morph=morph, render='raw', module=self._module_name) found_set = set() strong_regx = re.compile(r'strong:([GH]\d+)', re.I) morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I) tag_regx = re.compile(r''' ([^<]*) # Before tag. <(?P<tag>q|w|transChange|note) # Tag name. ([^>]*)> # Tag attributes. ([\w\W]*?)</(?P=tag)> # Tag text and end. ([^<]*) # Between tags. ''', re.I | re.X) divname_regx = re.compile(r''' (?:<seg>)? <(?:divineName)>+ ([^<]*?) ([\'s]*) </(?:divineName)> (?:</seg>)? ''', re.I | re.X) xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>', re.I) div_upper = lambda m: m.group(1).upper() + m.group(2) marker_regx = re.compile(r'.*marker="(.)".*', re.I) term_dict = defaultdict(list) len_attrs = 0 for verse_ref, verse_text in verse_iter: #print(render_raw(verse_text, strongs, morph)) #print(render_raw2(verse_text, strongs, morph)) #continue for term in search_terms.split(): term = term.replace('<', '').replace('>', '') term = term.replace('{', '').replace('}', '') v_text = '' info_print('%s\n' % verse_text, tag=4) term_regx = re.compile('\\b%s\\b' % term, re.I) for match in tag_regx.finditer(verse_text): opt, tag_name, tag_attr, tag_text, punct = match.groups() tag_text = xadded_regx.sub('\\1', tag_text) if match.re.search(tag_text): match_list = match.re.findall(tag_text + punct) else: match_list = [match.groups()] for tag_tup in match_list: opt, tag_name, tag_attr, tag_text, punct = tag_tup info_print(tag_tup, tag=4) value_list = [] attr_list = [] strongs_list = [] morph_list = [] tag_text = divname_regx.sub(div_upper, tag_text) v_text += marker_regx.sub('\\1 ', opt) + tag_text + \ punct if term.upper() in tag_attr: attr_list = [term.upper()] elif term_regx.search(tag_text): if strongs or not morph: strongs_list = strong_regx.findall(tag_attr) if morph: morph_list = morph_regx.findall(tag_attr) for lst in (strongs_list, morph_list, attr_list): if lst: attr_str = '%s"' % '", "'.join(lst) value_list = [attr_str, tag_text.strip()] term_dict[term].append({verse_ref: value_list}) len_attrs = max(len(attr_str), len_attrs) info_print(v_text, tag=4) max_len_ref = len(max(ref_set, key=len)) for term, lst in term_dict.items(): print('%s:' % term) for dic in lst: ref, (attrs, s) = list(dic.items())[0] s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s) print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l)) exit() @_process_search def test4_search(self, search_terms, strongs=False, morph=False, added=True, case_sensitive=False, range_str=''): """ A Test. """ ref_set = self._index_dict.value_union(search_terms.split(), case_sensitive) if range_str: # Only search through the supplied range. ref_set.intersection_update(range_str) if not ref_set: exit() ref_iter = iter(sorted(ref_set, key=sort_key)) # Get an iterator that will return tuples # (verse_reference, verse_text). verse_iter = VerseTextIter(ref_iter, strongs=strongs, morph=morph, render='raw', module=self._module_name) found_set = set() strong_regx = re.compile(r'strong:([GH]\d+)', re.I) morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I) tag_regx = re.compile(r''' ([^<>]*) # Before tag. <(?P<tag>seg|q|w|transChange|note|title)# Tag name. ([^>]*)> # Tag attributes. ([\w\W]*?)</(?P=tag)> # Tag text and end. ([^<]*) # Between tags. ''', re.I | re.X) divname_regx = re.compile(r''' <(?:divineName)> ([^<]*?) ([\'s]*) </(?:divineName)> ''', re.I | re.X) div_upper = lambda m: m.group(1).upper() + m.group(2) marker_regx = re.compile(r'.*marker="(.)".*', re.I) term_dict = defaultdict(list) len_attrs = 0 def recurse_tag(text, term, verse_ref, ctag_attr=''): """ Recursively parses raw verse text using regular expressions, and a list of dictionaries of the search term and any attributes with its text. """ term_list = [] for match in tag_regx.finditer(text): value_list = [] attr_list = [] strongs_list = [] morph_list = [] opt, tag_name, tag_attr, tag_text, punct = match.groups() if match.re.search(tag_text): term_list.extend(recurse_tag(tag_text, term, verse_ref, tag_attr)) else: info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4) if marker_regx.match(opt): opt = '' tag_text = opt + divname_regx.sub(div_upper, tag_text) + punct if term.upper() in tag_attr or term.upper() in ctag_attr: attr_list = [term.upper()] elif term_regx.search(tag_text): if strongs or not morph: strongs_list.extend(strong_regx.findall(tag_attr)) strongs_list.extend(strong_regx.findall(ctag_attr)) if morph: morph_list.extend(morph_regx.findall(tag_attr)) morph_list.extend(morph_regx.findall(ctag_attr)) for lst in (strongs_list, morph_list, attr_list): if lst: a_str = '%s"' % '", "'.join(lst) value_list = [a_str, tag_text.strip()] term_list.append({verse_ref: value_list}) return term_list for verse_ref, verse_text in verse_iter: #print(render_raw(verse_text, strongs, morph)) #print(render_raw2(verse_text, strongs, morph)) #continue for term in search_terms.split(): term = term.replace('<', '').replace('>', '') term = term.replace('{', '').replace('}', '') v_text = '' info_print('%s\n' % verse_text, tag=4) term_regx = re.compile('\\b%s\\b' % term, re.I) value_list = recurse_tag(verse_text, term, verse_ref) if value_list: for i in value_list: len_attrs = max(len(i[verse_ref][0]), len_attrs) term_dict[term].extend(value_list) max_len_ref = len(max(ref_set, key=len)) for term, lst in term_dict.items(): print('%s:' % term) for dic in lst: ref, (attrs, s) = list(dic.items())[0] s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s) print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l)) return set() concordance_search = test4_search class SearchCmd(Cmd): """ A Command line interface for searching the Bible. """ def __init__(self, module='KJV'): """ Initialize the settings. """ self.prompt = '\001\002search\001\002> ' self.intro = ''' %s Copyright (C) 2011 Josiah Gordon <josiahg@gmail.com> This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. This is a Bible search program that searches the KJV sword module. If you need help type 'help' to display a list of valid commands. For help on a specific command type 'help <command>.' Examples: mixed 'jordan h03383' (Finds all verses with Strong's number 'H03383' translated 'Jordan') concordance live (Lists the references of all the verses with the word 'live' in them, the Strong's number that was used, and what the phrase is that that Strong's number is translated as.) concordance h02418 (Lists the references of all the verses with the Strong's number 'H02418' and how it was translated. It only occures six times and all of them are in Daniel.) strongs h02418 (Looks up and gives the definition of the Strong's number 'H02418.') set range gen-mal (Sets the range to the Old Testament.) Just about everything has tab-completion, so you can hit tab a couple of times to see all the completions to what you are typing. If you want to see this intro again type: 'intro' To find out more type 'help' (example: 'help search' will list the help for the search command.) To exit type 'quit' or hit 'CTRL+D' ''' % os.path.basename(argv[0]) super(SearchCmd, self).__init__() self._quoted_regex = re.compile(''' ((?P<quote>'|") .*? (?P=quote)|[^'"]*) ''', re.X) # Perform the specified search. self._search = Search(module=module) self._results = set() self._search_list = [] self._highlight_list = [] self._words = self._search._index_dict['_words_'] self._strongs = self._search._index_dict['_strongs_'] self._morph = self._search._index_dict['_morph_'] self._book_list = list(book_gen()) self._setting_dict = { 'search_type': 'mixed', 'search_strongs': False, 'search_morph': False, 'case_sensitive': False, 'context': 0, 'one_line': False, 'show_notes': False, 'show_strongs': False, 'show_morph': False, 'added': True, 'range': '', 'extras': (), 'module': module, } self._search_types = ['mixed', 'mixed_phrase', 'multiword', 'anyword', 'combined', 'partial_word', 'ordered_multiword', 'regex', 'eitheror', 'sword_lucene', 'sword_phrase', 'sword_multiword', 'sword_entryattrib'] def _complete(self, text, line, begidx, endidx, complete_list): """ Return a list of matching text. """ retlist = [i for i in complete_list if i.startswith(text)] if not retlist: # If nothing was found try words that contain the text. retlist = [i for i in complete_list if text in i] if not retlist: # Finally try matching misspelled words. retlist = get_close_matches(text, complete_list, cutoff=0.7) return retlist def _get_list(self, args): """ Split the args into quoted strings and seperate words. """ arg_list = [] # Split the arg string into quoted phrases and single words. for i, c in self._quoted_regex.findall(args): if c in ['"', "'"]: arg_list.append(i.strip(c)) else: arg_list.extend(i.split()) return arg_list def do_test(self, args): """ A Test. """ quoted_regex = re.compile('''((?P<quote>'|").*?(?P=quote)|[^'"]*)''') print(quoted_regex.findall(args)) print(self._get_list(args)) def _print(self, text_iter): """ Print all the text breaking it and screens so the user can read it all. """ count = 0 for verse in text_iter: count += len(verse.splitlines()) if '\n' in verse else 1 print(verse) if count >= screen_size()[0] - 4: count = 0 try: input('[Press enter to see more, or CTRL+D to end.]') print('', end='') except: print('', end='') break def precmd(self, line): """ Set the correct settings before running the line. """ if not line: return line cmd = line.split()[0] if cmd in self._search_types: search_type = cmd if search_type.startswith('sword_'): self._setting_dict['extras'] = (search_type[6:],) search_type = search_type[:5] else: self._setting_dict['extras'] = () self._setting_dict['search_type'] = search_type return line def postcmd(self, stop, line): """ If lookup was called then show the results. """ if not line: return stop cmd = line.split()[0] if cmd == 'lookup': self.onecmd('show_results') return stop def completedefault(self, text, line, begidx, endidx): """ By default complete words in the Bible. """ words_list = self._words return self._complete(text, line, begidx, endidx, words_list) def do_shell(self, args): """ Execute shell commands. """ os.system(args) def do_concordance(self, args): """ Perform a concordance like search. """ if not args: return arg_list = self._get_list(args) # Search. strongs_search = self._setting_dict['search_strongs'] morph_search = self._setting_dict['search_morph'] search_range = self._setting_dict['range'] case_sensitive = self._setting_dict['case_sensitive'] search_added = self._setting_dict['added'] self._search.test4_search(arg_list, strongs_search, morph_search, search_added, case_sensitive, search_range) def do_show(self, args): """ Show relevent parts of the GPL. """ if args.lower() in ['c', 'copying']: # Show the conditions. print(copying_str) elif args.lower() in ['w', 'warranty']: # Show the warranty. print(warranty_str) else: # Show the entire license. print('%s%s' % (copying_str, warranty_str)) def do_EOF(self, args): """ Exit when eof is recieved. """ return True def do_quit(self, args): """ Exit. """ return True def do_help(self, args): """ Print the help. """ if args: try: self._print(getattr(self, 'do_%s' % args).__doc__.splitlines()) return except: pass super(SearchCmd, self).do_help(args) def do_intro(self, args): """ Re-print the intro screen. """ self._print(self.intro.splitlines()) def complete_show_results(self, text, line, begidx, endidx): """ Tab completion for the show_results command. """ cmd_list = ['strongs', 'morph', 'notes', 'one_line'] return self._complete(text, line, begidx, endidx, cmd_list) def do_show_results(self, args): """ Output the results. Print out all the verses that were either found by searching or by lookup. Extra arguments: +/-strongs - Enable/disable strongs in the output. +/-morph - Enable/disable morphology in the output +/-notes - Enable/disable foot notes in the output. +/-added - Enable/disable added text in the output. +/-one_line - Enable/disable one line output. anything else - If the output is from looking up verses with the lookup command, then any other words or quoted phrases given as arguments will be highlighted in the output. """ search_type = self._setting_dict['search_type'] strongs_search = self._setting_dict['search_strongs'] morph_search = self._setting_dict['search_morph'] search_range = self._setting_dict['range'] case_sensitive = self._setting_dict['case_sensitive'] search_added = self._setting_dict['added'] module_name = self._setting_dict['module'] highlight_list = self._highlight_list kwargs = self._setting_dict results = self._results # Get the output arguments. show_strongs = self._setting_dict['show_strongs'] or strongs_search show_morph = self._setting_dict['show_morph'] or morph_search show_notes = self._setting_dict['show_notes'] one_line = self._setting_dict['one_line'] arg_list = self._get_list(args) if '+strongs' in arg_list: show_strongs = True arg_list.remove('+strongs') if '+morph' in args: show_morph = True arg_list.remove('+morph') if '-strongs' in args: show_strongs = False arg_list.remove('-strongs') if '-morph' in args: show_strongs = False arg_list.remove('-morph') if '+notes' in args: show_notes = True arg_list.remove('+notes') if '-notes' in args: show_notes = False arg_list.remove('-notes') if '+one_line' in args: one_line = True arg_list.remove('+one_line') if '-one_line' in args: one_line = False arg_list.remove('-one_line') if '+added' in args: search_added = True arg_list.remove('+added') if '-added' in args: search_added = False arg_list.remove('-added') if search_range: results.intersection_update(parse_verse_range(search_range)) if not highlight_list: # Highlight anything else the user typed in. highlight_list = arg_list # Don't modify regular expression searches. if search_type != 'regex': regx_list = build_highlight_regx(highlight_list, case_sensitive, (search_type == 'ordered_multiword')) if kwargs['context']: regx_list.extend(build_highlight_regx(results, case_sensitive)) else: arg_str = ' '.join(arg_list) regx_list = [re.compile(arg_str, re.I if case_sensitive else 0)] # Flags for the highlight string. flags = re.I if not case_sensitive else 0 # Add the specified number of verses before and after to provide # context. context_results = sorted(add_context(results, kwargs['context']), key=sort_key) # Get a formated verse string generator. verse_gen = render_verses_with_italics(context_results, not one_line, show_strongs, show_morph, search_added, show_notes, highlight_search_terms, module_name, regx_list, highlight_text, flags) if one_line: # Print it all on one line. print(' '.join(verse_gen)) else: # Print the verses on seperate lines. self._print(verse_gen) #print('\n'.join(verse_gen)) def complete_lookup(self, text, line, begidx, endidx): """ Try to complete Verse references. """ name_list = self._book_list text = text.capitalize() return self._complete(text, line, begidx, endidx, name_list) def do_lookup(self, args): """ Lookup the verses by references. Example: lookup gen1:3-5;mal3 (Look up Genesis chapter 1 verses 3-5 and Malachi chapter 3.) """ self._results = parse_verse_range(args) self._highlight_list = [] def complete_strongs(self, text, line, begidx, endidx): """ Tabe complete Strong's numbers. """ text = text.capitalize() return self._complete(text, line, begidx, endidx, self._strongs) def do_strongs(self, numbers): """ Lookup one or more Strong's Numbers. strongs number,number,number.... """ # Lookup all the Strong's Numbers in the argument list. # Make all the numbers seperated by a comma. strongs_list = ','.join(numbers.upper().split()).split(',') #TODO: Find what Strong's Modules are available and use the best, # or let the user decide. greek_strongs_lookup = Lookup('StrongsRealGreek') hebrew_strongs_lookup = Lookup('StrongsRealHebrew') for strongs_num in strongs_list: # Greek Strong's Numbers start with a 'G' and Hebrew ones start # with an 'H.' if strongs_num.upper().startswith('G'): mod_name = 'StrongsRealGreek' else: mod_name = 'StrongsRealHebrew' print('%s\n' % mod_lookup(mod_name, strongs_num[1:])) def complete_morph(self, text, line, begidx, endidx): """ Tabe complete Morphological Tags. """ text = text.capitalize() return self._complete(text, line, begidx, endidx, self._morph) def do_morph(self, tags): """ Lookup one or more Morphological Tags. morph tag,tag,tag.... """ # Lookup all the Morphological Tags in the argument list. # I don't know how to lookup Hebrew morphological tags, so I # only lookup Greek ones in 'Robinson.' print('%s\n' % mod_lookup('Robinson', tags.upper())) def do_websters(self, words): """ Lookup one or more words in Websters Dictionary. websters word,word,word... """ # Lookup words in the dictionary. print('%s\n' % mod_lookup('WebstersDict', words)) def do_kjvd(self, words): """ Lookup one or more words in the KJV Dictionary. kjvd word,word,word... """ # Lookup words in the KJV dictionary. print('%s\n' % mod_lookup('KJVD', words)) def do_daily(self, daily): """ Display a daily devotional from 'Bagsters Daily light.' daily date/today Dates are given in the format Month.Day. The word 'today' is an alias to today's date. The default is to lookup today's devotional. """ daily = 'today' if not daily else daily # Lookup the specified daily devotional. if daily.lower() == 'today': # Today is an alias for today's date. daily = strftime('%m.%d') daily_lookup = Lookup('Daily') # Try to make the output nicer. print(daily_lookup.get_formatted_text(daily)) def complete_set(self, text, line, begidx, endidx): """ Complete setting options. """ setting_list = self._setting_dict.keys() return self._complete(text, line, begidx, endidx, setting_list) def do_set(self, args): """ Set settings. Run without arguments to see the current settings. set show_strongs = True/False - Enable strongs numbers in the output. set show_morph = True/False - Enable morphology in the output. set context = <number> - Show <number> verses of context. set case_sensitive = True/False - Set the search to case sensitive. set range = <range> - Confine search/output to <range>. set one_line = True/False - Don't break output at verses. set added = True/False - Show/search added text. set show_notes = True/False - Show foot-notes in output. set search_type = <type> - Use <type> for searching. set search_strongs = True/False - Search Strong's numbers (deprecated). set search_morph = True/False - Search Morphological Tags (deprecated). """ if not args: print("Current settings:\n") max_len = len(max(self._setting_dict.keys(), key=len)) for setting, value in self._setting_dict.items(): if setting.lower() == 'range': if not Sword: value = VerseRange.parse_range(value) value = '; '.join(str(i) for i in value) else: key = Sword.VerseKey() range_list = key.parseVerseList(value, 'Genesis 1:1', True, False) value = range_list.getRangeText() print('{1:{0}} = {2}'.format(max_len, setting, value)) print() else: for setting in args.split(';'): if '=' in setting: k, v = setting.split('=') elif ' ' in setting: k, v = setting.split() else: print(self._setting_dict.get(setting, '')) continue k = k.strip() v = v.strip() if isinstance(v, str): if v.lower() == 'false': v = False elif v.lower() == 'true': v = True elif v.isdigit(): v = int(v) self._setting_dict[k] = v def complete_search(self, text, line, begidx, endidx): """ Bible word completion to make searching easier. """ words_list = self._words return self._complete(text, line, begidx, endidx, words_list) complete_mixed = complete_search complete_mixed_phrase = complete_search complete_multiword = complete_search complete_anyword = complete_search complete_combined = complete_search complete_partial_word = complete_search complete_ordered_multiword = complete_search complete_regex = complete_search complete_eitheror = complete_search complete_sword_lucene = complete_search complete_sword_phrase = complete_search complete_sword_multiword = complete_search complete_sword_entryattrib = complete_search def do_search(self, args): """ Search the Bible. Search types are: mixed - A search made up of a mix of most of the other search types. Put an '!' in front of words/phrases that you don't want in any of the results. mixed_phrase - A phrase search that can include words, Strong's, and Morphology. Can be used in the mixed search by including words in quotes. multiword - Search for verses containing each word at least once. Use in the mixed search by putting a '+' in front of any word/phrase you want to be in all the results. anyword - Search for verses containing one or more of any of the words. Use in the mixed search by putting a '|' in front of any word/phrase you want in any but not necessarily all the results. eitheror - Search for verses containing one and only one of the words. In the mixed search put a '^' in front of two or more words/phrases to make the results contain one and only one of the marked search terms. combined - Search using a phrase like ('in' AND ('the' OR 'it')) finding verses that have both 'in' and 'the' or both 'in' and 'it'. To do the same thing with the mixed search use a phrase like this: (mixed '+in' '^the' '^it'). partial_word - Search for partial words (e.g. a search for 'begin*' would find all the words starting with 'begin'.) Use in the mixed search to make partial words in a phrase. ordered_multiword - Search for words in order, but not necessarily in a phrase. In the mixed search put a '~' in front of any quoted group of words you want to be in that order, but you don't mind if they have other words between them. regex - A regular expression search (slow). Examples: mixed - (mixed '+~in the beg*' '!was') finds any verse that has the words 'in', 'the', and any word starting with 'beg', in order, but not the word 'was.' mixed_phrase - (mixed_phrase 'h011121 of gomer') finds any verse with that phrase. mixed search flags first column prefix (these should come first): ---------------------------------------------------------------- ! = not (not in any of the results) + = all (in all the results) | = or (in at least one result) ^ = exclusive or (only one in any of the results) not example: (mixed 'in the beginning' !was) results will have the phrase 'in the beginning' but will not have the word 'was.' all example: (mixed 'in the beginning' +was) results may have the phrase 'in the beginning' but all of them will have the word 'was.' (note. this will find all verses with the word 'was' in them if you want it to have the phrase 'in the beginning' also you have to prefix it with a '+' aswell) or example: (mixed 'in the beginning' |was) results will be all the verses with the phrase 'in the beginning' and all the verses with the word 'was.' This is the default way the mixed search operates, so the '|' can be excluded in this case. exclusive or example: (mixed '^in the beginning' '^was') results will either have the phrase 'in the beginning' or the word 'was', but not both. To be effective you must have at least two search terms prefixed with '^.' mixed search flags second column prefix (these come after the first column flags): ------------------------------------------------------------------- ~ = sloppy phrase or ordered multiword & = regular expression search. sloppy phrase example: (mixed '~in the beginning') results will have all the words 'in', 'the', and 'beginning,' but they may have other words between them. regular expression example: (mixed '&\\b[iI]n\\b\s+\\b[tT[Hh][eE]\\b\s+\\b[bB]eginning\\b') results will be all the verses with the phrase 'in the beginning.' """ if not args: return arg_list = self._get_list(args) arg_str = ' '.join(arg_list) self._search_list = arg_list extras = self._setting_dict['extras'] search_type = self._setting_dict['search_type'] try: # Get the search function asked for. search_func = getattr(self._search, '%s_search' % search_type) except AttributeError as err: # An invalid search type was specified. print("Invalid search type: %s" % search_type, file=sys.stderr) exit() # Search. strongs_search = self._setting_dict['search_strongs'] morph_search = self._setting_dict['search_morph'] search_range = self._setting_dict['range'] case_sensitive = self._setting_dict['case_sensitive'] search_added = self._setting_dict['added'] self._results = search_func(arg_list, strongs_search, morph_search, search_added, case_sensitive, search_range, *extras) count = len(self._results) info_print("\nFound %s verse%s.\n" % \ (count, 's' if count != 1 else ''), tag=-10) print("To view the verses type 'show_results.'") if search_type in ['combined', 'combined_phrase']: # Combined searches are complicated. # Parse the search argument and build a highlight string from the # result. arg_parser = CombinedParse(arg_str) parsed_args = arg_parser.word_list not_l = arg_parser.not_list # Remove any stray '+'s. #highlight_str = highlight_str.replace('|+', ' ') if search_type == 'combined_phrase': # A phrase search needs to highlight phrases. highlight_list = parsed_args else: highlight_list = ' '.join(parsed_args).split() # Build the highlight string for the other searches. elif search_type in ['anyword', 'multiword', 'eitheror', 'partial_word']: # Highlight each word separately. highlight_list = arg_str.split() elif search_type == 'mixed': # In mixed search phrases are in quotes so the arg_list should be # what we want, but don't include any !'ed words. highlight_list = [i for i in arg_list if not i.startswith('!')] elif search_type in ['phrase', 'mixed_phrase', 'ordered_multiword']: # Phrases should highlight phrases. highlight_list = [arg_str] elif search_type == 'sword': highlight_list = arg_list self._highlight_list = highlight_list do_mixed = do_search do_mixed_phrase = do_search do_multiword = do_search do_anyword = do_search do_combined = do_search do_partial_word = do_search do_ordered_multiword = do_search do_regex = do_search do_eitheror = do_search do_sword_lucene = do_search do_sword_phrase = do_search do_sword_multiword = do_search do_sword_entryattrib = do_search
from .common_settings import * DEBUG = True TEMPLATE_DEBUG = DEBUG SECRET_KEY = 'dz(#w(lfve24ck!!yrt3l7$jfdoj+fgf+ru@w)!^gn9aq$s+&y' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } }
"""Module for generating CTS test descriptions and test plans.""" import glob import os import re import shutil import subprocess import sys import xml.dom.minidom as dom from cts import tools from multiprocessing import Pool def GetSubDirectories(root): """Return all directories under the given root directory.""" return [x for x in os.listdir(root) if os.path.isdir(os.path.join(root, x))] def GetMakeFileVars(makefile_path): """Extracts variable definitions from the given make file. Args: makefile_path: Path to the make file. Returns: A dictionary mapping variable names to their assigned value. """ result = {} pattern = re.compile(r'^\s*([^:#=\s]+)\s*:=\s*(.*?[^\\])$', re.MULTILINE + re.DOTALL) stream = open(makefile_path, 'r') content = stream.read() for match in pattern.finditer(content): result[match.group(1)] = match.group(2) stream.close() return result class CtsBuilder(object): """Main class for generating test descriptions and test plans.""" def __init__(self, argv): """Initialize the CtsBuilder from command line arguments.""" if len(argv) != 6: print 'Usage: %s <testRoot> <ctsOutputDir> <tempDir> <androidRootDir> <docletPath>' % argv[0] print '' print 'testRoot: Directory under which to search for CTS tests.' print 'ctsOutputDir: Directory in which the CTS repository should be created.' print 'tempDir: Directory to use for storing temporary files.' print 'androidRootDir: Root directory of the Android source tree.' print 'docletPath: Class path where the DescriptionGenerator doclet can be found.' sys.exit(1) self.test_root = sys.argv[1] self.out_dir = sys.argv[2] self.temp_dir = sys.argv[3] self.android_root = sys.argv[4] self.doclet_path = sys.argv[5] self.test_repository = os.path.join(self.out_dir, 'repository/testcases') self.plan_repository = os.path.join(self.out_dir, 'repository/plans') self.definedplans_repository = os.path.join(self.android_root, 'cts/tests/plans') def GenerateTestDescriptions(self): """Generate test descriptions for all packages.""" pool = Pool(processes=2) # generate test descriptions for android tests results = [] pool.close() pool.join() return sum(map(lambda result: result.get(), results)) def __WritePlan(self, plan, plan_name): print 'Generating test plan %s' % plan_name plan.Write(os.path.join(self.plan_repository, plan_name + '.xml')) def GenerateTestPlans(self): """Generate default test plans.""" # TODO: Instead of hard-coding the plans here, use a configuration file, # such as test_defs.xml packages = [] descriptions = sorted(glob.glob(os.path.join(self.test_repository, '*.xml'))) for description in descriptions: doc = tools.XmlFile(description) packages.append(doc.GetAttr('TestPackage', 'appPackageName')) # sort the list to give the same sequence based on name packages.sort() plan = tools.TestPlan(packages) plan.Exclude('android\.performance.*') self.__WritePlan(plan, 'CTS') self.__WritePlan(plan, 'CTS-TF') plan = tools.TestPlan(packages) plan.Exclude('android\.performance.*') plan.Exclude('android\.media\.cts\.StreamingMediaPlayerTest.*') # Test plan to not include media streaming tests self.__WritePlan(plan, 'CTS-No-Media-Stream') plan = tools.TestPlan(packages) plan.Exclude('android\.performance.*') self.__WritePlan(plan, 'SDK') plan.Exclude(r'android\.signature') plan.Exclude(r'android\.core.*') self.__WritePlan(plan, 'Android') plan = tools.TestPlan(packages) plan.Include(r'android\.core\.tests.*') plan.Exclude(r'android\.core\.tests\.libcore.\package.\harmony*') self.__WritePlan(plan, 'Java') # TODO: remove this once the tests are fixed and merged into Java plan above. plan = tools.TestPlan(packages) plan.Include(r'android\.core\.tests\.libcore.\package.\harmony*') self.__WritePlan(plan, 'Harmony') plan = tools.TestPlan(packages) plan.Include(r'android\.core\.vm-tests-tf') self.__WritePlan(plan, 'VM-TF') plan = tools.TestPlan(packages) plan.Include(r'android\.tests\.appsecurity') self.__WritePlan(plan, 'AppSecurity') # hard-coded white list for PDK plan plan.Exclude('.*') plan.Include('android\.aadb') plan.Include('android\.bluetooth') plan.Include('android\.graphics.*') plan.Include('android\.hardware') plan.Include('android\.media') plan.Exclude('android\.mediastress') plan.Include('android\.net') plan.Include('android\.opengl.*') plan.Include('android\.renderscript') plan.Include('android\.telephony') plan.Include('android\.nativemedia.*') plan.Include('com\.android\.cts\..*')#TODO(stuartscott): Should PDK have all these? self.__WritePlan(plan, 'PDK') flaky_tests = BuildCtsFlakyTestList() # CTS Stable plan plan = tools.TestPlan(packages) plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-stable') # CTS Flaky plan - list of tests known to be flaky in lab environment plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.Include(package+'$') plan.IncludeTests(package, test_list) self.__WritePlan(plan, 'CTS-flaky') small_tests = BuildAospSmallSizeTestList() medium_tests = BuildAospMediumSizeTestList() new_test_packages = BuildCtsVettedNewPackagesList() # CTS - sub plan for public, small size tests plan = tools.TestPlan(packages) plan.Exclude('.*') for package, test_list in small_tests.iteritems(): plan.Include(package+'$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-kitkat-small') # CTS - sub plan for public, medium size tests plan = tools.TestPlan(packages) plan.Exclude('.*') for package, test_list in medium_tests.iteritems(): plan.Include(package+'$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-kitkat-medium') # CTS - sub plan for hardware tests which is public, large plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'android\.hardware$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-hardware') # CTS - sub plan for media tests which is public, large plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'android\.media$') plan.Include(r'android\.view$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-media') # CTS - sub plan for mediastress tests which is public, large plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'android\.mediastress$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-mediastress') # CTS - sub plan for new tests that is vetted for L launch plan = tools.TestPlan(packages) plan.Exclude('.*') for package, test_list in new_test_packages.iteritems(): plan.Include(package+'$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-l-tests') #CTS - sub plan for new test packages added for staging plan = tools.TestPlan(packages) for package, test_list in small_tests.iteritems(): plan.Exclude(package+'$') for package, test_list in medium_tests.iteritems(): plan.Exclude(package+'$') for package, tests_list in new_test_packages.iteritems(): plan.Exclude(package+'$') plan.Exclude(r'android\.hardware$') plan.Exclude(r'android\.media$') plan.Exclude(r'android\.view$') plan.Exclude(r'android\.mediastress$') plan.Exclude(r'com\.android\.cts\.browserbench') for package, test_list in flaky_tests.iteritems(): plan.ExcludeTests(package, test_list) self.__WritePlan(plan, 'CTS-staging') plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'com\.drawelements\.') self.__WritePlan(plan, 'CTS-DEQP') plan = tools.TestPlan(packages) plan.Exclude('.*') plan.Include(r'android\.webgl') self.__WritePlan(plan, 'CTS-webview') def BuildAospMediumSizeTestList(): """ Construct a defaultdic that lists package names of medium tests already published to aosp. """ return { 'android.app' : [], 'android.core.tests.libcore.package.libcore' : [], 'android.core.tests.libcore.package.org' : [], 'android.core.vm-tests-tf' : [], 'android.dpi' : [], 'android.host.security' : [], 'android.net' : [], 'android.os' : [], 'android.permission2' : [], 'android.security' : [], 'android.telephony' : [], 'android.webkit' : [], 'android.widget' : [], 'com.android.cts.browserbench' : []} def BuildAospSmallSizeTestList(): """ Construct a defaultdict that lists packages names of small tests already published to aosp. """ return { 'android.aadb' : [], 'android.acceleration' : [], 'android.accessibility' : [], 'android.accessibilityservice' : [], 'android.accounts' : [], 'android.admin' : [], 'android.animation' : [], 'android.bionic' : [], 'android.bluetooth' : [], 'android.calendarcommon' : [], 'android.content' : [], 'android.core.tests.libcore.package.com' : [], 'android.core.tests.libcore.package.conscrypt' : [], 'android.core.tests.libcore.package.dalvik' : [], 'android.core.tests.libcore.package.sun' : [], 'android.core.tests.libcore.package.tests' : [], 'android.database' : [], 'android.dreams' : [], 'android.drm' : [], 'android.effect' : [], 'android.gesture' : [], 'android.graphics' : [], 'android.graphics2' : [], 'android.jni' : [], 'android.keystore' : [], 'android.location' : [], 'android.nativemedia.sl' : [], 'android.nativemedia.xa' : [], 'android.nativeopengl' : [], 'android.ndef' : [], 'android.opengl' : [], 'android.openglperf' : [], 'android.permission' : [], 'android.preference' : [], 'android.preference2' : [], 'android.provider' : [], 'android.renderscript' : [], 'android.rscpp' : [], 'android.rsg' : [], 'android.sax' : [], 'android.signature' : [], 'android.speech' : [], 'android.tests.appsecurity' : [], 'android.text' : [], 'android.textureview' : [], 'android.theme' : [], 'android.usb' : [], 'android.util' : [], 'com.android.cts.dram' : [], 'com.android.cts.filesystemperf' : [], 'com.android.cts.jank' : [], 'com.android.cts.opengl' : [], 'com.android.cts.simplecpu' : [], 'com.android.cts.ui' : [], 'com.android.cts.uihost' : [], 'com.android.cts.videoperf' : [], 'zzz.android.monkey' : []} def BuildCtsVettedNewPackagesList(): """ Construct a defaultdict that maps package names that is vetted for L. """ return { 'android.JobScheduler' : [], 'android.core.tests.libcore.package.harmony_annotation' : [], 'android.core.tests.libcore.package.harmony_beans' : [], 'android.core.tests.libcore.package.harmony_java_io' : [], 'android.core.tests.libcore.package.harmony_java_lang' : [], 'android.core.tests.libcore.package.harmony_java_math' : [], 'android.core.tests.libcore.package.harmony_java_net' : [], 'android.core.tests.libcore.package.harmony_java_nio' : [], 'android.core.tests.libcore.package.harmony_java_util' : [], 'android.core.tests.libcore.package.harmony_java_text' : [], 'android.core.tests.libcore.package.harmony_javax_security' : [], 'android.core.tests.libcore.package.harmony_logging' : [], 'android.core.tests.libcore.package.harmony_prefs' : [], 'android.core.tests.libcore.package.harmony_sql' : [], 'android.core.tests.libcore.package.jsr166' : [], 'android.core.tests.libcore.package.okhttp' : [], 'android.display' : [], 'android.host.theme' : [], 'android.jdwp' : [], 'android.location2' : [], 'android.print' : [], 'android.renderscriptlegacy' : [], 'android.signature' : [], 'android.tv' : [], 'android.uiautomation' : [], 'android.uirendering' : [], 'android.webgl' : [], 'com.drawelements.deqp.gles3' : [], 'com.drawelements.deqp.gles31' : []} def BuildCtsFlakyTestList(): """ Construct a defaultdict that maps package name to a list of tests that are known to be flaky in the lab or not passing on userdebug builds. """ return { 'android.app' : [ 'cts.ActivityManagerTest#testIsRunningInTestHarness',], 'android.dpi' : [ 'cts.DefaultManifestAttributesSdkTest#testPackageHasExpectedSdkVersion',], 'android.hardware' : [ 'cts.CameraTest#testVideoSnapshot', 'cts.CameraGLTest#testCameraToSurfaceTextureMetadata', 'cts.CameraGLTest#testSetPreviewTextureBothCallbacks', 'cts.CameraGLTest#testSetPreviewTexturePreviewCallback',], 'android.media' : [ 'cts.DecoderTest#testCodecResetsH264WithSurface', 'cts.StreamingMediaPlayerTest#testHLS',], 'android.net' : [ 'cts.ConnectivityManagerTest#testStartUsingNetworkFeature_enableHipri', 'cts.DnsTest#testDnsWorks', 'cts.SSLCertificateSocketFactoryTest#testCreateSocket', 'cts.SSLCertificateSocketFactoryTest#test_createSocket_bind', 'cts.SSLCertificateSocketFactoryTest#test_createSocket_simple', 'cts.SSLCertificateSocketFactoryTest#test_createSocket_wrapping', 'cts.TrafficStatsTest#testTrafficStatsForLocalhost', 'wifi.cts.NsdManagerTest#testAndroidTestCaseSetupProperly',], 'android.os' : [ 'cts.BuildVersionTest#testReleaseVersion', 'cts.BuildTest#testIsSecureUserBuild',], 'android.security' : [ 'cts.BannedFilesTest#testNoSu', 'cts.BannedFilesTest#testNoSuInPath', 'cts.ListeningPortsTest#testNoRemotelyAccessibleListeningUdp6Ports', 'cts.ListeningPortsTest#testNoRemotelyAccessibleListeningUdpPorts', 'cts.PackageSignatureTest#testPackageSignatures', 'cts.SELinuxDomainTest#testSuDomain', 'cts.SELinuxHostTest#testAllEnforcing',], 'android.webkit' : [ 'cts.WebViewClientTest#testOnUnhandledKeyEvent',], 'com.android.cts.filesystemperf' : [ 'RandomRWTest#testRandomRead', 'RandomRWTest#testRandomUpdate',], '' : []} def LogGenerateDescription(name): print 'Generating test description for package %s' % name if __name__ == '__main__': builder = CtsBuilder(sys.argv) result = builder.GenerateTestDescriptions() if result != 0: sys.exit(result) builder.GenerateTestPlans()
''' author: Tomasz Spustek e-mail: tomasz@spustek.pl University of Warsaw, July 06, 2015 ''' import numpy as np import scipy.stats as scp import matplotlib.pyplot as plt from scipy.io import loadmat from src.dictionary import tukey def generateTestSignal(gaborParams , sinusParams , asymetricWaveformsAParams , rectangularWaveformsAParams , numberOfSamples , samplingFrequency , noiseRatio , silenceFlag = 1): ''' gaborParams - numpy array (as for gaborFunction) or None sinusParams - numpy array of amplitude-frequency-phase trios or None asymetricWaveformsA - numpy array of ... rectangularWaveformsAParams - ... noiseRatio - float (0 - 1) ''' time = np.arange(0,numberOfSamples) signal = np.squeeze(np.zeros((numberOfSamples,1))) ind1 = 0 if gaborParams is not None: for gabor in gaborParams: (tmp,time) = gaborFunction(gabor) signal += tmp ind1 += 1 if silenceFlag == 0: print '{} gabors generated'.format(ind1) ind1 = 0 if sinusParams is not None: for param in sinusParams: freq = (param[1] / (0.5*samplingFrequency) ) * np.pi signal += np.array(param[0] * np.sin(freq * time + param[2])) ind1 += 1 if silenceFlag == 0: print '{} sinusoids generated'.format(ind1) ind1 = 0 if asymetricWaveformsAParams is not None: for asym in asymetricWaveformsAParams: amplitude = asym[0] freq = (asym[1] / (0.5*samplingFrequency) ) * np.pi pos = asym[2] sigma = asym[3] asymetry = asym[4] x = np.linspace(scp.lognorm.ppf(0.0001, asymetry),scp.lognorm.ppf(0.9999, asymetry), sigma) envelope = scp.lognorm.pdf(x, asymetry) tmp = np.squeeze(np.zeros((numberOfSamples,1))) tmp[pos:pos+sigma] = amplitude * envelope tmp = tmp * np.cos(freq * time) signal += tmp ind1 += 1 if silenceFlag == 0: print '{} asymmetrical waveforms generated'.format(ind1) ind1 = 0 if rectangularWaveformsAParams is not None: for rect in rectangularWaveformsAParams: amplitude = rect[0] freq = (rect[1] / (0.5*samplingFrequency) ) * np.pi pos = rect[2] sigma = rect[3] r = rect[4] envelope = tukey(sigma, r) tmp = np.squeeze(np.zeros((numberOfSamples,1))) tmp[pos:pos+sigma] = amplitude * envelope tmp = tmp * np.cos(freq * time) signal += tmp ind1 += 1 if silenceFlag == 0: print '{} rectangular waveforms generated'.format(ind1) return (signal , time) def gaborFunction(params): ''' params:numpy Array containing: numberOfSamples in [samples] samplingFreq in [Hz] atomFreq in [Hz] width in [s] position in [s] amplitude in [au] phase in [rad] ''' numberOfSamples = params[0] samplingFreq = params[1] amplitude = params[2] position = params[3] * samplingFreq width = params[4] * samplingFreq frequency = (params[5] / (0.5*samplingFreq) ) * np.pi phase = params[6] time = np.arange(0,numberOfSamples) signal = np.array(amplitude * np.exp(-np.pi*((time-position)/width)**2) * np.cos(frequency*(time-position)+phase)) return (signal , time) def simpleValues(): numberOfSamples = 1000 samplingFreq = 250.0 amplitude = 12.0 position1 = 3.0 position2 = 1.0 width = 0.5 frequency1 = 12.0 frequency2 = 15.0 phase = 0.0 gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude,position1,width,frequency1,phase],[numberOfSamples,samplingFreq,amplitude,position2,width,frequency2,phase]]) sinusParams = np.array([[5.0,5.0,0.0]]) noiseRatio = 0.0 return (gaborParams , sinusParams , None , None , noiseRatio , samplingFreq , numberOfSamples) def advancedValues(): numberOfSamples = 1000 samplingFreq = 250.0 amplitude1 = 12 amplitude2 = 20 freq1 = 10.0 freq2 = 20.0 pos1 = 250 pos2 = 500 sigma = 500 asymetry = 0.45 asymetricParams = np.array([[amplitude1,freq1,pos1,sigma,asymetry],[amplitude2,freq2,pos2,sigma,asymetry]]) sinusParams = np.array([[2.0,5.0,0.0]]) noiseRatio = 0.0 return(None , sinusParams , asymetricParams , None , noiseRatio , samplingFreq , numberOfSamples) def masterValues(): numberOfSamples = 2000 samplingFreq = 250.0 amplitude1 = 15 amplitude2 = 20 amplitude3 = 10 freq1 = 5.0 freq2 = 10.0 freq3 = 15.0 pos1 = 2.0 pos2 = 1000 pos3 = 1500 sigma1 = 0.5 sigma2 = 500 sigma3 = 300 asymetry = 0.45 rectangularity = 0.25 gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude1,pos1,sigma1,freq1,0]]) asymetricParams = np.array([[amplitude2,freq2,pos2,sigma2,asymetry]]) rectParams = np.array([[amplitude3,freq3,pos3,sigma3,rectangularity]]) sinusParams = np.array([[2.0,5.0,0.0]]) noiseRatio = 0.0 return(gaborParams , sinusParams , asymetricParams , rectParams , noiseRatio , samplingFreq , numberOfSamples) def loadSyntheticSigmalFromEEGLABFile(nameOfFile): structure = loadmat(nameOfFile) data = structure['EEG']['data'][0][0] data = data.transpose([2,0,1]) info = {} info['samplingFreq'] = structure['EEG']['srate'][0][0][0][0] info['numberOfChannels'] = structure['EEG']['nbchan'][0][0][0][0] info['numberOfSamples'] = structure['EEG']['pnts'][0][0][0][0] info['numberOfSeconds'] = structure['EEG']['pnts'][0][0][0][0] / info['samplingFreq'] info['numberOfTrials'] = structure['EEG']['trials'][0][0][0][0] # print structure['EEG']['chanlocs'][0][0][0,2] time = np.arange(0 , info['numberOfSeconds'] , 1./info['samplingFreq']) return (data , time , info)
import glob import logging import ConfigParser from lxml import etree log = logging.getLogger('CommonSetting') class RawConfigSetting(object): '''Just pass the file path''' def __init__(self, path, type=type): self._type = type self._path = path self.init_configparser() def _type_convert_set(self, value): if type(value) == bool: if value == True: value = 'true' elif value == False: value = 'false' # This is a hard code str type, so return '"xxx"' instead of 'xxx' if self._type == str: value = "'%s'" % value return value def _type_convert_get(self, value): if value == 'false': value = False elif value == 'true': value = True # This is a hard code str type, so return '"xxx"' instead of 'xxx' if self._type == str or type(value) == str: if (value.startswith('"') and value.endswith('"')) or \ (value.startswith("'") and value.endswith("'")): value = eval(value) return value def init_configparser(self): self._configparser = ConfigParser.ConfigParser() self._configparser.read(self._path) def sections(self): return self._configparser.sections() def options(self, section): return self._configparser.options(section) def set_value(self, section, option, value): value = self._type_convert_set(value) if not self._configparser.has_section(section): self._configparser.add_section(section) self._configparser.set(section, option, value) with open(self._path, 'wb') as configfile: self._configparser.write(configfile) self.init_configparser() def get_value(self, section, option): if self._type: if self._type == int: getfunc = getattr(self._configparser, 'getint') elif self._type == float: getfunc = getattr(self._configparser, 'getfloat') elif self._type == bool: getfunc = getattr(self._configparser, 'getboolean') else: getfunc = getattr(self._configparser, 'get') value = getfunc(section, option) else: log.debug("No type message, so use the generic get") value = self._configparser.get(section, option) value = self._type_convert_get(value) return value class Schema(object): cached_schema = {} cached_schema_tree = {} cached_override = {} @classmethod def load_override(cls): log.debug("\tLoading override") for override in glob.glob('/usr/share/glib-2.0/schemas/*.gschema.override'): try: cs = RawConfigSetting(override) for section in cs.sections(): cls.cached_override[section] = {} for option in cs.options(section): cls.cached_override[section][option] = cs.get_value(section, option) except Exception, e: log.error('Error while parsing override file: %s' % override) @classmethod def load_schema(cls, schema_id, key): log.debug("Loading schema value for: %s/%s" % (schema_id, key)) if not cls.cached_override: cls.load_override() if schema_id in cls.cached_override and \ key in cls.cached_override[schema_id]: return cls.cached_override[schema_id][key] if schema_id in cls.cached_schema and \ key in cls.cached_schema[schema_id]: return cls.cached_schema[schema_id][key] schema_defaults = {} for schema_path in glob.glob('/usr/share/glib-2.0/schemas/*'): if not schema_path.endswith('.gschema.xml') and not schema_path.endswith('.enums.xml'): #TODO deal with enums continue if schema_path in cls.cached_schema_tree: tree = cls.cached_schema_tree[schema_path] else: tree = etree.parse(open(schema_path)) for schema_node in tree.findall('schema'): if schema_node.attrib.get('id') == schema_id: for key_node in schema_node.findall('key'): if key_node.findall('default'): schema_defaults[key_node.attrib['name']] = cls.parse_value(key_node) else: continue cls.cached_schema[schema_id] = schema_defaults if key in schema_defaults: return schema_defaults[key] else: return None @classmethod def parse_value(cls, key_node): log.debug("Try to get type for value: %s" % key_node.items()) value = key_node.find('default').text #TODO enum type if key_node.attrib.get('type'): type = key_node.attrib['type'] if type == 'b': if value == 'true': return True else: return False elif type == 'i': return int(value) elif type == 'd': return float(value) elif type == 'as': return eval(value) return eval(value)
""" autoxml is a metaclass for automatic XML translation, using a miniature type system. (w00t!) This is based on an excellent high-level XML processing prototype that Gurer prepared. Method names are mixedCase for compatibility with minidom, an old library. """ import locale import codecs import types import formatter import sys from StringIO import StringIO import gettext __trans = gettext.translation('pisi', fallback=True) _ = __trans.ugettext import pisi from pisi.exml.xmlext import * from pisi.exml.xmlfile import XmlFile import pisi.context as ctx import pisi.util as util import pisi.oo as oo class Error(pisi.Error): pass mandatory, optional = range(2) # poor man's enum String = types.StringType Text = types.UnicodeType Integer = types.IntType Long = types.LongType Float = types.FloatType class LocalText(dict): """Handles XML tags with localized text""" def __init__(self, tag = "", req = optional): self.tag = tag self.req = req dict.__init__(self) def decode(self, node, errs, where = ""): # flags, tag name, instance attribute assert self.tag != '' nodes = getAllNodes(node, self.tag) if not nodes: if self.req == mandatory: errs.append(where + ': ' + _("At least one '%s' tag should have local text") % self.tag ) else: for node in nodes: lang = getNodeAttribute(node, 'xml:lang') c = getNodeText(node) if not c: errs.append(where + ': ' + _("'%s' language of tag '%s' is empty") % (lang, self.tag)) # FIXME: check for dups and 'en' if not lang: lang = 'en' self[lang] = c def encode(self, node, errs): assert self.tag != '' for key in self.iterkeys(): newnode = addNode(node, self.tag) setNodeAttribute(newnode, 'xml:lang', key) addText(newnode, '', self[key].encode('utf8')) #FIXME: maybe more appropriate for pisi.util @staticmethod def get_lang(): try: (lang, encoding) = locale.getlocale() if not lang: (lang, encoding) = locale.getdefaultlocale() if lang==None: # stupid python means it is C locale return 'en' else: return lang[0:2] except: raise Error(_('LocalText: unable to get either current or default locale')) def errors(self, where = unicode()): errs = [] langs = [ LocalText.get_lang(), 'en', 'tr', ] if not util.any(lambda x : self.has_key(x), langs): errs.append( where + ': ' + _("Tag should have at least the current locale, or failing that an English or Turkish version")) #FIXME: check if all entries are unicode return errs def format(self, f, errs): L = LocalText.get_lang() if self.has_key(L): f.add_flowing_data(self[L]) elif self.has_key('en'): # fallback to English, blah f.add_flowing_data(self['en']) elif self.has_key('tr'): # fallback to Turkish f.add_flowing_data(self['tr']) else: errs.append(_("Tag should have at least the current locale, or failing that an English or Turkish version")) #FIXME: factor out these common routines def print_text(self, file = sys.stdout): w = Writer(file) # plain text f = formatter.AbstractFormatter(w) errs = [] self.format(f, errs) if errs: for x in errs: ctx.ui.warning(x) def __str__(self): L = LocalText.get_lang() if self.has_key(L): return self[L] elif self.has_key('en'): # fallback to English, blah return self['en'] elif self.has_key('tr'): # fallback to Turkish return self['tr'] else: return "" class Writer(formatter.DumbWriter): """adds unicode support""" def __init__(self, file=None, maxcol=78): formatter.DumbWriter.__init__(self, file, maxcol) def send_literal_data(self, data): self.file.write(data.encode("utf-8")) i = data.rfind('\n') if i >= 0: self.col = 0 data = data[i+1:] data = data.expandtabs() self.col = self.col + len(data) self.atbreak = 0 class autoxml(oo.autosuper, oo.autoprop): """High-level automatic XML transformation interface for xmlfile. The idea is to declare a class for each XML tag. Inside the class the tags and attributes nested in the tag are further elaborated. A simple example follows: class Employee: __metaclass__ = autoxml t_Name = [xmlfile.Text, xmlfile.mandatory] a_Type = [xmlfile.Integer, xmlfile.optional] This class defines a tag and an attribute nested in Employee class. Name is a string and type is an integer, called basic types. While the tag is mandatory, the attribute may be left out. Other basic types supported are: xmlfile.Float, xmlfile.Double and (not implemented yet): xmlfile.Binary By default, the class name is taken as the corresponding tag, which may be overridden by defining a tag attribute. Thus, the same tag may also be written as: class EmployeeXML: ... tag = 'Employee' ... In addition to basic types, we allow for two kinds of complex types: class types and list types. A declared class can be nested in another class as follows class Position: __metaclass__ = autoxml t_Name = [xmlfile.Text, xmlfile.mandatory] t_Description = [xmlfile.Text, xmlfile.optional] which we can add to our Employee class. class Employee: __metaclass__ = autoxml t_Name = [xmlfile.Text, xmlfile.mandatory] a_Type = [xmlfile.Integer, xmlfile.optional] t_Position = [Position, xmlfile.mandatory] Note some unfortunate redundancy here with Position; this is justified by the implementation (kidding). Still, you might want to assign a different name than the class name that goes in there, which may be fully qualified. There is more! Suppose we want to define a company, with of course many employees. class Company: __metaclass__ = autoxml t_Employees = [ [Employee], xmlfile.mandatory, 'Employees/Employee'] Logically, inside the Company/Employees tag, we will have several Employee tags, which are inserted to the Employees instance variable of Company in order of appearance. We can define lists of any other valid type. Here we used a list of an autoxml class defined above. The mandatory flag here asserts that at least one such record is to be found. You see, it works like magic, when it works of course. All of it done without a single brain exploding. """ def __init__(cls, name, bases, dict): """entry point for metaclass code""" #print 'generating class', name # standard initialization super(autoxml, cls).__init__(name, bases, dict) xmlfile_support = XmlFile in bases cls.autoxml_bases = filter(lambda base: isinstance(base, autoxml), bases) #TODO: initialize class attribute __xml_tags #setattr(cls, 'xml_variables', []) # default class tag is class name if not dict.has_key('tag'): cls.tag = name # generate helper routines, for each XML component names = [] inits = [] decoders = [] encoders = [] errorss = [] formatters = [] # read declaration order from source # code contributed by bahadir kandemir from inspect import getsourcelines from itertools import ifilter import re fn = re.compile('\s*([tas]_[a-zA-Z]+).*').findall lines = filter(fn, getsourcelines(cls)[0]) decl_order = map(lambda x:x.split()[0], lines) # there should be at most one str member, and it should be # the first to process order = filter(lambda x: not x.startswith('s_'), decl_order) # find string member str_members = filter(lambda x:x.startswith('s_'), decl_order) if len(str_members)>1: raise Error('Only one str member can be defined') elif len(str_members)==1: order.insert(0, str_members[0]) for var in order: if var.startswith('t_') or var.startswith('a_') or var.startswith('s_'): name = var[2:] if var.startswith('a_'): x = autoxml.gen_attr_member(cls, name) elif var.startswith('t_'): x = autoxml.gen_tag_member(cls, name) elif var.startswith('s_'): x = autoxml.gen_str_member(cls, name) (name, init, decoder, encoder, errors, format_x) = x names.append(name) inits.append(init) decoders.append(decoder) encoders.append(encoder) errorss.append(errors) formatters.append(format_x) # generate top-level helper functions cls.initializers = inits def initialize(self, uri = None, keepDoc = False, tmpDir = '/tmp', **args): if xmlfile_support: if args.has_key('tag'): XmlFile.__init__(self, tag = args['tag']) else: XmlFile.__init__(self, tag = cls.tag) for base in cls.autoxml_bases: base.__init__(self) #super(cls, self).__init__(tag = tag) cooperative shit disabled for now for init in inits:#self.__class__.initializers: init(self) for x in args.iterkeys(): setattr(self, x, args[x]) # init hook if hasattr(self, 'init'): self.init(tag) if xmlfile_support and uri: self.read(uri, keepDoc, tmpDir) cls.__init__ = initialize cls.decoders = decoders def decode(self, node, errs, where = unicode(cls.tag)): for base in cls.autoxml_bases: base.decode(self, node, errs, where) for decode_member in decoders:#self.__class__.decoders: decode_member(self, node, errs, where) if hasattr(self, 'decode_hook'): self.decode_hook(node, errs, where) cls.decode = decode cls.encoders = encoders def encode(self, node, errs): for base in cls.autoxml_bases: base.encode(self, node, errs) for encode_member in encoders:#self.__class__.encoders: encode_member(self, node, errs) if hasattr(self, 'encode_hook'): self.encode_hook(node, errs) cls.encode = encode cls.errorss = errorss def errors(self, where = unicode(name)): errs = [] for base in cls.autoxml_bases: errs.extend(base.errors(self, where)) for errors in errorss:#self.__class__.errorss: errs.extend(errors(self, where)) if hasattr(self, 'errors_hook'): errs.extend(self.errors_hook(where)) return errs cls.errors = errors def check(self): errs = self.errors() if errs: errs.append(_("autoxml.check: '%s' errors") % len(errs)) raise Error(*errs) cls.check = check cls.formatters = formatters def format(self, f, errs): for base in cls.autoxml_bases: base.format(self, f, errs) for formatter in formatters:#self.__class__.formatters: formatter(self, f, errs) cls.format = format def print_text(self, file = sys.stdout): w = Writer(file) # plain text f = formatter.AbstractFormatter(w) errs = [] self.format(f, errs) if errs: for x in errs: ctx.ui.warning(x) cls.print_text = print_text if not dict.has_key('__str__'): def str(self): strfile = StringIO(u'') self.print_text(strfile) print 'strfile=',unicode(strfile) s = strfile.getvalue() strfile.close() print 's=',s,type(s) return s cls.__str__ = str if not dict.has_key('__eq__'): def equal(self, other): # handle None if other ==None: return False # well, must be False at this point :) for name in names: try: if getattr(self, name) != getattr(other, name): return False except: return False return True def notequal(self, other): return not self.__eq__(other) cls.__eq__ = equal cls.__ne__ = notequal if xmlfile_support: def read(self, uri, keepDoc = False, tmpDir = '/tmp', sha1sum = False, compress = None, sign = None, copylocal = False): "read XML file and decode it into a python object" self.readxml(uri, tmpDir, sha1sum=sha1sum, compress=compress, sign=sign, copylocal=copylocal) errs = [] self.decode(self.rootNode(), errs) if errs: errs.append(_("autoxml.read: File '%s' has errors") % uri) raise Error(*errs) if hasattr(self, 'read_hook'): self.read_hook(errs) if not keepDoc: self.unlink() # get rid of the tree errs = self.errors() if errs: errs.append(_("autoxml.read: File '%s' has errors") % uri) raise Error(*errs) def write(self, uri, keepDoc = False, tmpDir = '/tmp', sha1sum = False, compress = None, sign = None): "encode the contents of the python object into an XML file" errs = self.errors() if errs: errs.append(_("autoxml.write: object validation has failed")) raise Error(*errs) errs = [] self.newDocument() self.encode(self.rootNode(), errs) if hasattr(self, 'write_hook'): self.write_hook(errs) if errs: errs.append(_("autoxml.write: File encoding '%s' has errors") % uri) raise Error(*errs) self.writexml(uri, tmpDir, sha1sum=sha1sum, compress=compress, sign=sign) if not keepDoc: self.unlink() # get rid of the tree cls.read = read cls.write = write def gen_attr_member(cls, attr): """generate readers and writers for an attribute member""" #print 'attr:', attr spec = getattr(cls, 'a_' + attr) tag_type = spec[0] assert type(tag_type) == type(type) def readtext(node, attr): return getNodeAttribute(node, attr) def writetext(node, attr, text): #print 'write attr', attr, text setNodeAttribute(node, attr, text) anonfuns = cls.gen_anon_basic(attr, spec, readtext, writetext) return cls.gen_named_comp(attr, spec, anonfuns) def gen_tag_member(cls, tag): """generate helper funs for tag member of class""" #print 'tag:', tag spec = getattr(cls, 't_' + tag) anonfuns = cls.gen_tag(tag, spec) return cls.gen_named_comp(tag, spec, anonfuns) def gen_tag(cls, tag, spec): """generate readers and writers for the tag""" tag_type = spec[0] if type(tag_type) is types.TypeType and \ autoxml.basic_cons_map.has_key(tag_type): def readtext(node, tagpath): #print 'read tag', node, tagpath return getNodeText(node, tagpath) def writetext(node, tagpath, text): #print 'write tag', node, tagpath, text addText(node, tagpath, text.encode('utf8')) return cls.gen_anon_basic(tag, spec, readtext, writetext) elif type(tag_type) is types.ListType: return cls.gen_list_tag(tag, spec) elif tag_type is LocalText: return cls.gen_insetclass_tag(tag, spec) elif type(tag_type) is autoxml or type(tag_type) is types.TypeType: return cls.gen_class_tag(tag, spec) else: raise Error(_('gen_tag: unrecognized tag type %s in spec') % str(tag_type)) def gen_str_member(cls, token): """generate readers and writers for a string member""" spec = getattr(cls, 's_' + token) tag_type = spec[0] assert type(tag_type) == type(type) def readtext(node, blah): #node.normalize() # piksemel doesn't have this return getNodeText(node) def writetext(node, blah, text): #print 'writing', text, type(text) addText(node, "", text.encode('utf-8')) anonfuns = cls.gen_anon_basic(token, spec, readtext, writetext) return cls.gen_named_comp(token, spec, anonfuns) def gen_named_comp(cls, token, spec, anonfuns): """generate a named component tag/attr. a decoration of anonymous functions that do not bind to variable names""" name = cls.mixed_case(token) token_type = spec[0] req = spec[1] (init_a, decode_a, encode_a, errors_a, format_a) = anonfuns def init(self): """initialize component""" setattr(self, name, init_a()) def decode(self, node, errs, where): """decode component from DOM node""" #print '*', name setattr(self, name, decode_a(node, errs, where + '.' + unicode(name))) def encode(self, node, errs): """encode self inside, possibly new, DOM node using xml""" if hasattr(self, name): value = getattr(self, name) else: value = None encode_a(node, value, errs) def errors(self, where): """return errors in the object""" errs = [] if hasattr(self, name) and getattr(self, name) != None: value = getattr(self,name) errs.extend(errors_a(value, where + '.' + name)) else: if req == mandatory: errs.append(where + ': ' + _('Mandatory variable %s not available') % name) return errs def format(self, f, errs): if hasattr(self, name): value = getattr(self,name) f.add_literal_data(token + ': ') format_a(value, f, errs) f.add_line_break() else: if req == mandatory: errs.append(_('Mandatory variable %s not available') % name) return (name, init, decode, encode, errors, format) def mixed_case(cls, identifier): """helper function to turn token name into mixed case""" if identifier is "": return "" else: if identifier[0]=='I': lowly = 'i' # because of pythonic idiots we can't choose locale in lower else: lowly = identifier[0].lower() return lowly + identifier[1:] def tagpath_head_last(cls, tagpath): "returns split of the tag path into last tag and the rest" try: lastsep = tagpath.rindex('/') except ValueError, e: return ('', tagpath) return (tagpath[:lastsep], tagpath[lastsep+1:]) def parse_spec(cls, token, spec): """decompose member specification""" name = cls.mixed_case(token) token_type = spec[0] req = spec[1] if len(spec)>=3: path = spec[2] # an alternative path specified elif type(token_type) is type([]): if type(token_type[0]) is autoxml: # if list of class, by default nested like in most PSPEC path = token + '/' + token_type[0].tag else: # if list of ordinary type, just take the name for path = token elif type(token_type) is autoxml: # if a class, by default its tag path = token_type.tag else: path = token # otherwise it's the same name as # the token return name, token_type, req, path def gen_anon_basic(cls, token, spec, readtext, writetext): """Generate a tag or attribute with one of the basic types like integer. This has got to be pretty generic so that we can invoke it from the complex types such as Class and List. The readtext and writetext arguments achieve the DOM text access for this datatype.""" name, token_type, req, tagpath = cls.parse_spec(token, spec) def initialize(): """default value for all basic types is None""" return None def decode(node, errs, where): """decode from DOM node, the value, watching the spec""" #text = unicode(readtext(node, token), 'utf8') # CRUFT FIXME text = readtext(node, token) #print 'decoding', token_type, text, type(text), '.' if text: try: #print token_type, autoxml.basic_cons_map[token_type] value = autoxml.basic_cons_map[token_type](text) except Exception, e: print 'exception', e value = None errs.append(where + ': ' + _('Type mismatch: read text cannot be decoded')) return value else: if req == mandatory: errs.append(where + ': ' + _('Mandatory token %s not available') % token) return None def encode(node, value, errs): """encode given value inside DOM node""" if value: writetext(node, token, unicode(value)) else: if req == mandatory: errs.append(_('Mandatory token %s not available') % token) def errors(value, where): errs = [] if value and not isinstance(value, token_type): errs.append(where + ': ' + _('Type mismatch. Expected %s, got %s') % (token_type, type(value)) ) return errs def format(value, f, errs): """format value for pretty printing""" f.add_literal_data(unicode(value)) return initialize, decode, encode, errors, format def gen_class_tag(cls, tag, spec): """generate a class datatype""" name, tag_type, req, path = cls.parse_spec(tag, spec) def make_object(): obj = tag_type.__new__(tag_type) obj.__init__(tag=tag, req=req) return obj def init(): return make_object() def decode(node, errs, where): node = getNode(node, tag) if node: try: obj = make_object() obj.decode(node, errs, where) return obj except Error: errs.append(where + ': '+ _('Type mismatch: DOM cannot be decoded')) else: if req == mandatory: errs.append(where + ': ' + _('Mandatory argument not available')) return None def encode(node, obj, errs): if node and obj: try: #FIXME: this doesn't look pretty classnode = newNode(node, tag) obj.encode(classnode, errs) addNode(node, '', classnode) except Error: if req == mandatory: # note: we can receive an error if obj has no content errs.append(_('Object cannot be encoded')) else: if req == mandatory: errs.append(_('Mandatory argument not available')) def errors(obj, where): return obj.errors(where) def format(obj, f, errs): try: obj.format(f, errs) except Error: if req == mandatory: errs.append(_('Mandatory argument not available')) return (init, decode, encode, errors, format) def gen_list_tag(cls, tag, spec): """generate a list datatype. stores comps in tag/comp_tag""" name, tag_type, req, path = cls.parse_spec(tag, spec) pathcomps = path.split('/') comp_tag = pathcomps.pop() list_tagpath = util.makepath(pathcomps, sep='/', relative=True) if len(tag_type) != 1: raise Error(_('List type must contain only one element')) x = cls.gen_tag(comp_tag, [tag_type[0], mandatory]) (init_item, decode_item, encode_item, errors_item, format_item) = x def init(): return [] def decode(node, errs, where): l = [] nodes = getAllNodes(node, path) #print node, tag + '/' + comp_tag, nodes if len(nodes)==0 and req==mandatory: errs.append(where + ': ' + _('Mandatory list empty')) ix = 1 for node in nodes: dummy = newNode(node, "Dummy") addNode(dummy, '', node) l.append(decode_item(dummy, errs, where + unicode("[%s]" % ix, 'utf8'))) #l.append(decode_item(node, errs, where + unicode("[%s]" % ix))) ix += 1 return l def encode(node, l, errs): if l and len(l) > 0: for item in l: if list_tagpath: listnode = addNode(node, list_tagpath, branch = False) else: listnode = node encode_item(listnode, item, errs) #encode_item(node, item, errs) else: if req is mandatory: errs.append(_('Mandatory list empty')) def errors(l, where): errs = [] ix = 1 for node in l: errs.extend(errors_item(node, where + '[%s]' % ix)) ix += 1 return errs def format(l, f, errs): # TODO: indent here ix = 1 length = len(l) for node in l: f.add_flowing_data(str(ix) + ': ') format_item(node, f, errs) if ix != length: f.add_flowing_data(', ') ix += 1 return (init, decode, encode, errors, format) def gen_insetclass_tag(cls, tag, spec): """generate a class datatype that is highly integrated don't worry if that means nothing to you. this is a silly hack to implement local text quickly. it's not the most elegant thing in the world. it's basically a copy of class tag""" name, tag_type, req, path = cls.parse_spec(tag, spec) def make_object(): obj = tag_type.__new__(tag_type) obj.__init__(tag=tag, req=req) return obj def init(): return make_object() def decode(node, errs, where): if node: try: obj = make_object() obj.decode(node, errs, where) return obj except Error: errs.append(where + ': ' + _('Type mismatch: DOM cannot be decoded')) else: if req == mandatory: errs.append(where + ': ' + _('Mandatory argument not available')) return None def encode(node, obj, errs): if node and obj: try: #FIXME: this doesn't look pretty obj.encode(node, errs) except Error: if req == mandatory: # note: we can receive an error if obj has no content errs.append(_('Object cannot be encoded')) else: if req == mandatory: errs.append(_('Mandatory argument not available')) def errors(obj, where): return obj.errors(where) def format(obj, f, errs): try: obj.format(f, errs) except Error: if req == mandatory: errs.append(_('Mandatory argument not available')) return (init, decode, encode, errors, format) basic_cons_map = { types.StringType : str, #TODO: python 3.x: same behavior? #python 2.x: basic_cons_map[unicode](a) where a is unicode str yields #TypeError: decoding Unicode is not supported #types.UnicodeType : lambda x: unicode(x,'utf8'), lambda x:x? types.UnicodeType : lambda x:x, #: unicode types.IntType : int, types.FloatType : float, types.LongType : long }
from __future__ import absolute_import from base64 import b64encode from socket import error as SocketError from hashlib import md5, sha1 from binascii import hexlify, unhexlify import sys from core.backports.collections import namedtuple try: from select import poll, POLLIN except ImportError: # `poll` doesn't exist on OSX and other platforms poll = False try: from select import select except ImportError: # `select` doesn't exist on AppEngine. select = False try: # Test for SSL features SSLContext = None HAS_SNI = False import ssl from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 from ssl import SSLContext # Modern SSL? from ssl import HAS_SNI # Has SNI? except ImportError: pass from .packages import six from .exceptions import LocationParseError, SSLError class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): """ Datastructure for representing an HTTP URL. Used as a return value for :func:`parse_url`. """ slots = () def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) @property def hostname(self): """For backwards-compatibility with urlparse. We're nice like that.""" return self.host @property def request_uri(self): """Absolute path including the query string.""" uri = self.path or '/' if self.query is not None: uri += '?' + self.query return uri def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. Example: :: >>> split_first('foo/bar?baz', '?/=') ('foo', 'bar?baz', '/') >>> split_first('foo/bar?baz', '123') ('foo/bar?baz', '', None) Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue if min_idx is None or idx < min_idx: min_idx = idx min_delim = d if min_idx is None or min_idx < 0: return s, '', None return s[:min_idx], s[min_idx+1:], min_delim def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example: :: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/', ...) >>> parse_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this imeplementations does silly things to be optimal # on CPython. scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: auth, url = url.split('@', 1) # IPv6 if url and url[0] == '[': host, url = url[1:].split(']', 1) # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if not port.isdigit(): raise LocationParseError("Failed to parse: %s" % url) port = int(port) elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment) def get_host(url): """ Deprecated. Use :func:`.parse_url` instead. """ p = parse_url(url) return p.scheme or 'http', p.hostname, p.port def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = 'gzip,deflate' headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ b64encode(six.b(basic_auth)).decode('utf-8') return headers def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if not sock: # Platform-specific: AppEngine return False if not poll: if not select: # Platform-specific: AppEngine return False try: return select([sock], [], [], 0.0)[0] except SocketError: return True # This version is better on platforms that support it. p = poll() p.register(sock, POLLIN) for (fno, ev) in p.poll(0.0): if fno == sock.fileno(): # Either data is buffered (bad), or the connection is dropped. return True def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbrevation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ # Maps the length of a digest to a possible hash function producing # this digest. hashfunc_map = { 16: md5, 20: sha1 } fingerprint = fingerprint.replace(':', '').lower() digest_length, rest = divmod(len(fingerprint), 2) if rest or digest_length not in hashfunc_map: raise SSLError('Fingerprint is of invalid length.') # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) hashfunc = hashfunc_map[digest_length] cert_digest = hashfunc(cert).digest() if not cert_digest == fingerprint_bytes: raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(hexlify(fingerprint_bytes), hexlify(cert_digest))) if SSLContext is not None: # Python 3.2+ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): """ All arguments except `server_hostname` have the same meaning as for :func:`ssl.wrap_socket` :param server_hostname: Hostname of the expected certificate """ context = SSLContext(ssl_version) context.verify_mode = cert_reqs if ca_certs: try: context.load_verify_locations(ca_certs) # Py32 raises IOError # Py33 raises FileNotFoundError except Exception: # Reraise as SSLError e = sys.exc_info()[1] raise SSLError(e) if certfile: # FIXME: This block needs a test. context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) return context.wrap_socket(sock) else: # Python 3.1 and earlier def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): return wrap_socket(sock, keyfile=keyfile, certfile=certfile, ca_certs=ca_certs, cert_reqs=cert_reqs, ssl_version=ssl_version)
import boto3 sqs = boto3.resource('sqs') queue = sqs.get_queue_by_name(QueueName='test') print(queue.url) print(queue.attributes.get('DelaySeconds'))
import cryspy.numbers import cryspy.geo class Goniometer: def __init__(self, motiontype, axis, direction, parametername): assert motiontype in ["translation", "rotation"], \ "First parameter for creating a Goniometer " \ "must be one of the strings " \ "'translation' or 'rotation'." assert axis in ["x", "y", "z"], \ "Second parameter for creating a Goniometer " \ "must be one of the strings 'x', 'y' or 'z'" if motiontype == "translation": assert direction in ["positive", "negative"], \ "Third parameter for creating a Goniometer " \ "for translation must be one of the strings " \ "'positive' or 'negative'" elif motiontype == "rotation": assert direction in ["clockwise", "counterclockwise"], \ "Third parameter for creating a Goniometer for " \ "rotation must be one of the strings "\ "'clockwise' or 'counterclockwise'" assert isinstance(parametername, str), \ "Fourth parameter for creating a Goniometer must be " \ "of type str. You can use any string." self.composed = False self.motiontype = motiontype self.axis = axis self.direction = direction self.parameternames = [parametername] def operator(self, parameters): assert isinstance(parameters, dict), \ "Parameter of cryspy.lab.Goniometer.operator() must be a " \ "dictionary" if not self.composed: assert self.parameternames[0] in parameters.keys(), \ "You must specify the parameter called '%s'."\ %(self.parameternames[0]) parameter = parameters[self.parameternames[0]] if self.motiontype == "translation": if self.direction == "negative": parameter = -parameter if self.axis == "x": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[1, 0, 0, parameter], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ] ) ) if self.axis == "y": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[1, 0, 0, 0], [0, 1, 0, parameter], [0, 0, 1, 0], [0, 0, 0, 1] ] ) ) if self.axis == "z": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, parameter], [0, 0, 0, 1] ] ) ) elif self.motiontype == "rotation": if self.direction == "clockwise": parameter = -parameter cos = cryspy.numbers.dcos(parameter) sin = cryspy.numbers.dsin(parameter) if self.axis == "x": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[1, 0, 0, 0], [0, cos, -sin, 0], [0, sin, cos, 0], [0, 0, 0, 1] ] ) ) if self.axis == "y": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[ cos, 0, sin, 0], [ 0, 1, 0, 0], [-sin, 0, cos, 0], [ 0, 0, 0, 1] ] ) ) if self.axis == "z": return cryspy.geo.Operator( cryspy.numbers.Matrix( [[cos, -sin, 0, 0], [sin, cos, 0, 0], [ 0, 0, 1, 0], [ 0, 0, 0, 1] ] ) ) else: return cryspy.geo.Operator( self.lower_gonio.operator(parameters).value * self.upper_gonio.operator(parameters).value ) def __str__(self): if not self.composed: if self.motiontype == "translation": return " / translate by \\ \n" \ "| %16s |\n" \ "| along |\n" \ "| %s-axis |\n" \ " \\ %8s / "\ %(self.parameternames[0], self.axis, self.direction) elif self.motiontype == "rotation": return " / rotate by \\ \n" \ "| %16s |\n" \ "| around |\n" \ "| %s-axis |\n" \ " \\ %16s / "\ %(self.parameternames[0], self.axis, self.direction) else: return cryspy.blockprint.block([[str(self.lower_gonio), " \n \n*\n \n", str(self.upper_gonio)]]) def __mul__(self, right): if isinstance(right, Goniometer): for parametername in right.parameternames: assert parametername not in self.parameternames, \ "Cannot multiply two Goniometers which have " \ "both the parameter '%s'."%(parametername) result = Goniometer("translation", "x", "positive", "dummy") result.composed = True result.motiontype = None result.axis = None result.direction = None result.parameternames = self.parameternames + right.parameternames result.lower_gonio = self result.upper_gonio = right return result else: return NotImplemented
import sys # NOQA import profile import ConfigParser import pygame from pygame import * from static_functions import * import camera as camera import planet as planet from orbitable import GCD_Singleton, SoundSystem_Singleton from helldebris_collection import HellDebrisCollection from team import Team from simplestats import SimpleStats wwidth = 1024 wheight = 768 p1_name = "Player1" p2_name = "Player2" config = ConfigParser.RawConfigParser() config.read('profile.cfg') wwidth = config.getint("Screen", "width") wheight = config.getint("Screen", "height") p1_name = config.get("Player", "P1_name") p2_name = config.get("Player", "P2_name") display = (wwidth, wheight) clock = pygame.time.Clock() class Profile(): def __init__(self, is_player2_present=False, is_player1_ai=False, is_player2_ai=False, player1_team="Green", player2_team="Red", greenteamsize=8, redteamsize=8, debris_min=6, debris_max=20, draw_planet=False, name=""): self.p2 = is_player2_present self.p1_ai = is_player1_ai self.p2_ai = is_player2_ai self.p1_team = player1_team self.p2_team = player2_team mingreen = int(self.p1_team == "Green") + int(self.p2_team == "Green" and self.p2) minred = int(self.p1_team == "Red") + int(self.p2_team == "Red" and self.p2) self.green = max(mingreen, greenteamsize) self.red = max(minred, redteamsize) self.hellmin = debris_min self.hellmax = debris_max self.draw_planet = draw_planet self.name = name self.ERAD = 1000 self.MAXRAD = 1700 self.ORBHEIGHT = 350 def game_init(self): pygame.init() self.PROFILESTEP = False self.UPDAE_GAME = pygame.USEREVENT + 1 pygame.time.set_timer(self.UPDAE_GAME, GAME_SPEED) self.screen = pygame.display.set_mode(display) if self.p2: self.bg1 = Surface((wwidth, wheight/2)) self.bg2 = Surface((wwidth, wheight/2)) self.cam2 = camera.Camera(self.bg2, first_in_order=False) self.bgs = (self.bg1, self.bg2) else: self.bg1 = Surface((wwidth, wheight)) self.bgs = (self.bg1,) self.cam1 = camera.Camera(self.bg1) if self.name == "": pygame.display.set_caption("Orbotor") else: pygame.display.set_caption("Orbotor - %s" % self.name) self.pl = planet.Planet(self.bgs, self.ERAD, self.MAXRAD, "planet.png" if self.draw_planet else None) GCD_Singleton.set_planet(self.pl) self.soundsys = SoundSystem_Singleton self.spawn = (self.ERAD+self.ORBHEIGHT, 0) self.team1 = Team("Green", "#009900", self.green, self.spawn, self.pl) self.team2 = Team("Red", "#880000", self.red, self.spawn, self.pl) self.team1.set_opponent_team(self.team2) self.team2.set_opponent_team(self.team1) self.hell = HellDebrisCollection(self.spawn, self.pl, self.hellmin, self.hellmax) if self.p1_team == "Green": self.player1 = self.team1.guys[0] if self.p2: if self.p2_team == "Green": self.player2 = self.team1.guys[1] elif self.p2_team == "Red": self.player2 = self.team2.guys[0] else: raise Exception("unknown team for p2: %s" % self.p2_team) elif self.p1_team == "Red": self.player1 = team2.guys[0] if self.p2: if self.p2_team == "Green": self.player2 = self.team1.guys[0] elif self.p2_team == "Red": self.player2 = self.team2.guys[1] else: raise Exception("unknown team for p2: %s" % self.p2_team) else: raise Exception("unknown team for p1: %s" % self.p1_team) self.player1.is_ai = self.p1_ai if self.p1_ai: self.player1.set_name("[bot] %s" % p1_name) else: self.player1.set_name("%s" % p1_name) if self.p2: self.player2.is_ai = self.p2_ai if self.p2_ai: self.player2.set_name("[bot] %s" % p2_name) else: self.player2.set_name("%s" % p2_name) self.stats1 = SimpleStats(self.team1, self.team2, self.player1) if self.p2: self.stats2 = SimpleStats(self.team1, self.team2, self.player2) def game_key_listen(self, event): if event.type == KEYDOWN and event.key == K_F1: self.PROFILESTEP = True self.game_step() elif event.type == KEYDOWN and event.key == K_F2: print len(GCD_Singleton.orbitables) elif event.type == KEYDOWN and event.key == K_F5: self.soundsys.switch() if not self.p1_ai: self.player1.catch_kb_event(event) if self.p2 and not self.p2_ai: self.player2.catch_kb_event_hotseat(event) self.cam1.keys_listen(event) if self.p2: self.cam2.keys_listen_hotseat(event) def game_step(self): if self.PROFILESTEP: profile.runctx("self._step()", globals(), {"self": self}) else: self._step() def _step(self): self.team2.step() # todo faster better stronger self.team1.step() self.hell.step() self.player1.focus(self.cam1) self.cam1.step() if self.p2: self.player2.focus(self.cam2) self.cam2.step() GCD_Singleton.step() def game_draw(self): if self.PROFILESTEP: profile.runctx("self._draw()", globals(), {"self": self}) self.PROFILESTEP = False else: self._draw() def _draw(self): clock.tick(60) tup = [self.pl, ] + self.team1.objectslist() + self.team2.objectslist()\ + self.hell.objectslist() + self.pl.cities tup = tuple(tup) self.cam1.translate_coords(*tup) if self.p2: self.cam2.translate_coords(*tup) self.stats1.draw(self.bg1) self.screen.blit(self.bg1, (0, 0)) if self.p2: self.stats2.draw(self.bg2) self.screen.blit(self.bg2, (0, wheight/2)) pygame.display.update() def DefaultProfile(draw_planet, hell): return Profile(draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1]) def HotseatProfile(draw_planet, hell): return Profile(is_player2_present=True, draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1]) def RivalProfile(draw_planet, hell): return Profile(is_player2_present=True, is_player2_ai=True, draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1]) def CoopProfile(draw_planet, hell): return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1]) def SpectateProfile(draw_planet, hell): return Profile(is_player1_ai=True, draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1]) def SurvivalProfile(draw_planet): return Profile(draw_planet=draw_planet, debris_min=35, debris_max=70, greenteamsize=1, redteamsize=0) def CoopSurvivalProfile(draw_planet): return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet, debris_min=35, debris_max=70, greenteamsize=2, redteamsize=0)
import pickle import time from PyQt5 import QtCore, QtWidgets, QtWidgets from random import randint class Quill: class Event: NIL, LOC, MSG, OBJ, SWAP, PLC = tuple(range(100, 106)) cond_ops = [("AT", "data.location_no == param1"), ("NOT AT", "data.location_no != param1"), ("AT GT", "data.location_no > param1"), ("AT LT", "data.location_no < param1"), ("PRESENT", "data.objects[param1].location == data.location_no"), ("ABSENT", "data.objects[param1].location != data.location_no"), ("WORN", "data.objects[param1].location == data.Object.WORN"), ("NOT WORN", "data.objects[param1].location != data.Object.WORN"), ("CARRIED", "data.objects[param1].location == data.Object.CARRIED"), ("NOT CARR", "data.objects[param1].location != data.Object.CARRIED"), ("CHANCE", "param1 < randint(1, 100)"), ("ZERO", "not data.flags[param1]"), ("NOT ZERO", "data.flags[param1]"), ("EQ", "data.flags[param1]==param2"), ("GT", "data.flags[param1]>param2"), ("LT", "data.flags[param1]<param2")] ptas = { 0: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK", "ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE", "PAUSE", "GOTO", "MESSAGE", "REMOVE", "GET", "DROP", "WEAR", "DESTROY", "CREATE", "SWAP", "SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"], [0] * 11 + [1] * 9 + [2, 1, 1] + [2]*16, [NIL] * 12 + [LOC, MSG] + [OBJ] * 6 + [SWAP] + [NIL] * 18), 5: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK", "ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE", "CLS", "DROPALL", "PAUSE", "PAPER", "INK", "BORDER", "GOTO", "MESSAGE", "REMOVE", "GET", "DROP", "WEAR", "DESTROY", "CREATE", "SWAP", "PLACE", "SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"], [0] * 13 + [1] * 12 + [2, 2, 1, 1] + [2] * 10, [NIL] * 17 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL]*12), 7: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK", "ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE", "CLS", "DROPALL", "AUTOG", "AUTOD", "AUTOW", "AUTOR", "PAUSE", "PAPER", "INK", "BORDER", "GOTO", "MESSAGE", "REMOVE", "GET", "DROP", "WEAR", "DESTROY", "CREATE", "SWAP", "PLACE", "SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"], [0] * 17 + [1] * 12 + [2, 2, 1] + [2] * 7, [NIL] * 21 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL] * 8)} def __init__(self, sna, ptr, dbver=0): self.act_ops, self.nparams, self.types = self.ptas[dbver] self.word1 = sna[ptr] self.word2 = sna[ptr + 1] p = sna[ptr + 2] + 256 * sna[ptr + 3] self.conditions = [] while sna[p] != 0xff: opcode = sna[p] param1 = sna[p + 1] if opcode > 12: param2 = sna[p + 2] p += 3 else: param2 = None p += 2 self.conditions.append((opcode, param1, param2)) p += 1 self.actions = [] while sna[p] != 0xff: opcode = sna[p] nparams = self.nparams[opcode] params = tuple(sna[p + 1:p + 1 + nparams]) self.actions.append((opcode, params)) p += 1 + nparams # returns: -1 for error, # 0 for not matching, # 1 for matching and done (no further processing), # 2 for matching, but process further def __call__(self, data, system, word1, word2): def match(w, sw): return w == sw or (not w and sw == 255) if system or match(word1, self.word1) and match(word2, self.word2): for op, param1, param2 in self.conditions: if not eval(self.cond_ops[op][1]): return 0 for action in self.actions: meth = getattr(data, "do_" + self.act_ops[action[0]].lower()) res = meth(*action[1]) if res: return res return 2 class Location: def __init__(self, description, conn=None): self.description = description self.connections = conn or {} class Object: INVALID, CARRIED, WORN, NOT_CREATED = 0xff, 0xfe, 0xfd, 0xfc def __init__(self, description, initial=NOT_CREATED): self.description = description self.initial = self.location = initial ####################################### # Actions def do_get(self, param1): loc = self.objects[param1].location if loc == self.Object.WORN or loc == self.Object.CARRIED: self.printout("To vendar že nosim!") return -1 elif loc != self.location_no: self.printout("Saj ni tukaj.") return -1 elif self.flags[1] == self.nobjects_carry: return -1 else: self.objects[param1].location = self.Object.CARRIED self.flags[1] += 1 def do_wear(self, param1): loc = self.objects[param1].location if loc == self.Object.WORN: self.printout("To vendar že nosim!") return -1 elif loc != self.Object.CARRIED: self.printout("Tega sploh nimam!") return -1 else: self.objects[param1].location = self.Object.WORN def do_drop(self, param1): loc = self.objects[param1].location if (loc == self.Object.WORN) or (loc == self.Object.CARRIED): self.objects[param1].location = self.location_no else: self.printout("Tega sploh nimam.") return -1 def do_remove(self, param1): loc = self.objects[param1].location if loc != self.Object.WORN: self.printout("Tega sploh ne nosim!") return -1 else: self.objects[param1].location = self.Object.CARRIED def do_dropall(self): for obj in self.objects: if obj.location == self.Object.WORN or \ obj.location == self.Object.CARRIED: obj.location = self.location_no self.flags[1] = 0 def do_goto(self, locno): self.location = self.locations[locno] self.location_no = locno self.flags[2] = locno def do_create(self, objno): loc = self.objects[objno].location if loc == self.Object.WORN or loc == self.Object.CARRIED: self.flags[1] -= 1 self.objects[objno].location = self.location_no def do_destroy(self, objno): loc = self.objects[objno].location if loc == self.Object.WORN or loc == self.Object.CARRIED: self.flags[1] -= 1 self.objects[objno].location = self.Object.NOT_CREATED def do_place(self, objno, locno): loc = self.objects[objno].location if loc == self.Object.WORN or loc == self.Object.CARRIED: self.flags[1] -= 1 self.objects[objno].location = locno def do_print(self, flagno): if flagno > 47: self.printout(self.flags[flagno] + 256 * self.flags[flagno+1]) else: self.printout(self.flags[flagno]) def do_plus(self, flagno, no): self.flags[flagno] += no if self.flags[flagno] > 255: if flagno > 47: self.flags[flagno] -= 256 self.flags[flagno + 1] = (self.flags[flagno + 1] + 1) % 256 else: self.flags[flagno] = 255 def do_minus(self, flagno, no): self.flags[flagno] -= no if self.flags[flagno] < 0: if flagno > 47: self.flags[flagno] += 256 self.flags[flagno + 1] -= 1 if self.flags[flagno] == -1: self.flags[flagno] = 0 else: self.flags[flagno] = 0 def do_inven(self): inv = "" for obj in self.objects: if obj.location == Quill.Object.CARRIED: inv += "<LI>%s</LI>" % obj.description elif obj.location == Quill.Object.WORN: inv += "<LI>%s (nosim)</LI>" % obj.description if inv: inv = "Prenašam pa tole:<UL>"+inv+"</UL" else: inv = "Prenašam pa tole:<UL>pravzaprav nič</UL" self.printout(inv) def do_message(self, msgno): self.printout(self.messages[msgno]) do_mes = do_message def do_set(self, flagno): self.flags[flagno] = 255 def do_clear(self, flagno): self.flags[flagno] = 0 def do_let(self, flagno, no): self.flags[flagno] = no def do_add(self, flg1, flg2): return self.do_plus(flg1, self.flags[flg2]) def do_sum(self, flg1, flg2): return self.do_minus(flg1, self.flags[flg2]) def do_swap(self, obj1, obj2): self.objects[obj1].location, self.objects[obj2].location = \ self.objects[obj2].location, self.objects[obj1].location def do_desc(self): self.update_location() def do_quit(self): self.reset() self.update_location() def do_end(self): self.anykey() self.reset() self.update_location() def do_ok(self): self.printout("OK") return 1 @staticmethod def do_done(): return 1 def do_anykey(self): self.anykey() def do_save(self): self.printout("Shranjevati pa še ne znam ...") def do_load(self): self.printout("Nalagati pa znam ...") def do_star(self, _): self.printout("'STAR' ni implementiran") def do_jsr(self, *_): self.printout("'JSR' ni implementiran") def do_sound(self, lsb, msg): pass def do_beep(self, lsb, msg): pass def do_turns(self): self.printout("Ukazov dal si %4i zares<br>" % self.turns) def do_score(self): self.printout("Nabral si %i odstotkov<br>" % self.flags[30]) @staticmethod def do_pause(s50): time.sleep(s50/50) def do_cls(self): pass ####################################### # Initialization from an .sna file def __init__(self, name="kontra.sna", dbver=0): def single_string(ptr): # TODO: Simplify s = "" while sna[ptr] != 0xe0: s += chr(255 - sna[ptr]) ptr += 1 return s def word(ptr): return sna[ptr] + 256 * sna[ptr + 1] def get_sign_ptr(): sign_ptr = -1 while True: sign_ptr = sna.find(b"\x10", sign_ptr + 1) if sign_ptr == -1: raise ValueError("Quill signature not found") if sna[sign_ptr+2:sign_ptr+12:2] == b"\x11\x12\x13\x14\x15": return sign_ptr def read_vocabulary(): vocabulary = {} index_to_word = [] pv = self.pvocabulary while sna[pv]: index = sna[pv + 4] w = "".join(chr(255 - x) for x in sna[pv:pv + 4]).strip() vocabulary[w] = index if index >= len(index_to_word): index_to_word += [None] * (index - len(index_to_word) + 1) if not index_to_word[index]: index_to_word[index] = w pv += 5 return vocabulary, index_to_word def get_cond_table(ptr): events = [] while sna[ptr]: events.append(self.Event(sna, ptr)) ptr += 4 return events colors = ["#000000", "#0000ff", "#ff0000", "#ff00ff", "#00ff00", "#00ffff", "#ffff00", "#ffffff"] replacs = {"&": "&amp", "<": "&lt;", ">": "&gt;", "\x60": "&pound;", "\x7f": "&copy;", "\x95": "č", "\x94": "š", "\xa0": "ž", "\x92": "Č", "\xa2": "Š", "\x90": "Ž"} # How would these codes be reset? # codes = {"\x12": "<big>", "\x13": "<b>", "\x14": "<i>", "\x15": "<u>"} def get_items(ptr, n): items = [] for i in range(n): s = "" xpos = 0 while 1: c = chr(255 - sna[ptr]) ptr += 1 if c in replacs: s += replacs[c] xpos += 1 elif c >= ' ': s += c xpos += 1 elif c == "\x1f": break elif c == "\x06": if 255 - sna[ptr] == 6: s += "<P>" xpos = 0 ptr += 1 else: s += " " xpos = 0 elif c == "\x10": # INK cl = 255 - sna[ptr] ptr += 1 if cl < 8: s += "<FONT COLOR=%s>" % colors[cl] elif c == "\x11": # PAPER ptr += 1 # elif c in codes: # if sna[ptr] != 255: # s += "<%s>" % codes[c] # else: # s += "</%s>" % codes[c] # ptr += 1 if xpos == 32: if sna[ptr] != ' ': s += " " xpos = 0 items.append(s) return items def read_connections(): ptr = word(self.pconnections) for location in self.locations: while sna[ptr] != 0xff: location.connections[sna[ptr]] = sna[ptr + 1] ptr += 2 ptr += 1 def read_object_positions(): ptr = self.pobject_locations for i in range(len(self.objects)): self.objects[i].initial = sna[ptr + i] sna = b"\x00" * (16384 - 27) + open(name, "rb").read() ptr = get_sign_ptr() + 13 self.nobjects_carry = sna[ptr] self.nobjects = sna[ptr+1] self.nlocations = sna[ptr+2] self.nmessages = sna[ptr+3] if dbver: ptr += 1 self.nsystem_messages = sna[ptr+3] self.pdictionary = ptr + 29 self.presponse = word(ptr+4) self.pprocess = word(ptr+6) self.pobjects = word(ptr+8) self.plocations = word(ptr+10) self.pmessages = word(ptr+12) off = 2 if dbver else 0 self.pconnections = word(ptr + 14 + off) self.pvocabulary = word(ptr+16 + off) self.pobject_locations = word(ptr+18 + off) if dbver: psystem_messages = word(ptr+14) self.system_messages = \ get_items(word(psystem_messages), self.nsystem_messages) self.pobject_map = word(ptr+22) else: self.system_messages = [single_string(ptr) for ptr in [ 27132, 27152, 27175, 27209, 27238, 27260, 27317, 27349, 27368, 27390, 27397, 27451, 27492, 27525, 27551, 27568, 27573, 27584, 27590, 27613, 27645, 27666, 27681, 27707, 27726]] self.pobject_map = None self.vocabulary, self.index_to_word = read_vocabulary() self.dir_codes = [self.vocabulary[i] for i in ["SZ", "S", "SV", "Z", "V", "JZ", "J", "JV", "NOTE", "VEN", "GOR", "DOL"]] self.responses = get_cond_table(self.presponse) self.process = get_cond_table(self.pprocess) self.objects = [Quill.Object(x) for x in get_items(word(self.pobjects), self.nobjects)] read_object_positions() self.locations = [Quill.Location(x) for x in get_items(word(self.plocations), self.nlocations)] read_connections() self.messages = get_items(word(self.pmessages), self.nmessages) self.location = self.locations[1] self.location_no = 1 self.flags = [0]*64 self.flags[1] = 255 self.flags[2] = self.location_no self.cheat_locations = {} self.turns = 0 self.izpisano = "" self.dlg = self.izpis = self.ukazna = None self.setup_ui() self.goljufija_const() self.reset() ####################################### # Processing def reset(self): self.flags[2] = self.location_no = 0 self.location = self.locations[self.location_no] self.turns = 0 for obj in self.objects: obj.location = obj.initial self.update_location() self.process_events(self.process, 1) self.goljufija() def update_location(self): self.izpisano = "" if self.flags[0]: self.set_location_description( "Temno je kot v rogu. Nič ne vidim.", (0,) * 12) return desc = self.location.description inv = [obj.description for obj in self.objects if obj.location == self.location_no] if len(inv) == 1: desc += "<br>Vidim tudi " + inv[0] + "<br>" elif inv: desc += "<br>Vidim tudi: " + "".join("<br>- %s" % i for i in inv) self.set_location_description( desc, [direct in self.location.connections for direct in self.dir_codes]) ####################################### # GUI def setup_ui(self): goljufam = True dlg = self.dlg = QtWidgets.QWidget() dlg.setWindowTitle("Kontrabant") dlg.setEnabled(True) dlg.resize(1024 if goljufam else 544, 380) dlg.setLayout(QtWidgets.QHBoxLayout()) vbox1 = QtWidgets.QWidget() vbox1.setFixedWidth(350) vbox1.setLayout(QtWidgets.QVBoxLayout()) dlg.layout().addWidget(vbox1) self.izpis = QtWidgets.QTextEdit() self.izpis.setReadOnly(True) self.izpis.setMinimumHeight(290) self.izpis.setFocusPolicy(QtCore.Qt.NoFocus) self.izpis.setStyleSheet( "font-family: Arial; font-size: 14; color: white; background: blue") self.izpisano = "" self.ukazna = QtWidgets.QLineEdit() self.ukazna.setFocus() self.ukazna.returnPressed.connect(self.user_command) vbox1.layout().addWidget(self.izpis) vbox1.layout().addWidget(self.ukazna) dlg.show() tabs = QtWidgets.QTabWidget() tabs.setMinimumSize(350, 290) dlg.layout().addWidget(tabs) self.g_lokacija = QtWidgets.QTreeWidget() tabs.addTab(self.g_lokacija, "Lokacija") self.g_lokacija.setHeaderHidden(True) self.g_predmeti = QtWidgets.QTreeWidget() tabs.addTab(self.g_predmeti, "Predmeti") self.g_predmeti.setColumnCount(3) # GPredmeti->setColumnAlignment(1, AlignHCenter); # GPredmeti->setColumnAlignment(2, AlignHCenter); self.g_predmeti.setColumnWidth(0, 340) # self.g_predmeti.setColumnWidthMode(0, QListView::Manual); self.g_predmeti.setSortingEnabled(True) self.g_dogodki = QtWidgets.QTreeWidget() tabs.addTab(self.g_dogodki, "Dogodki") self.g_dogodki.setColumnCount(1) self.g_dogodki.setHeaderHidden(True) self.g_lokacije = QtWidgets.QTreeWidget() tabs.addTab(self.g_lokacije, "Lokacije") self.g_dogodki.setHeaderHidden(True) self.g_zastavice = QtWidgets.QTreeWidget() tabs.addTab(self.g_zastavice, "Zastavice") self.g_zastavice.setColumnCount(1) self.g_zastavice.setHeaderHidden(True) self.g_sporocila = QtWidgets.QTreeWidget() tabs.addTab(self.g_sporocila, "Ukazi") self.g_sporocila.setColumnCount(1) self.g_predmeti.setColumnWidth(0, 100) self.g_sporocila.setHeaderHidden(True) ####################################### # Controller def process_events(self, table, system, word1=None, word2=None): match = 0 for event in table: res = event(self, system, word1, word2) if res in [-1, 1]: return res elif res: match = 1 return match def user_command(self): command = self.ukazna.text().upper() if not command: return self.ukazna.setText("") self.printout('<font color="yellow">&gt;&nbsp; %s</font>' % command) self.turns += 1 commsplit = command.split() if commsplit and (commsplit[0] in ["SHRA", "SAVE"]): self.save() return if commsplit and (commsplit[0] in ["NALO", "LOAD"]): self.load() self.goljufija() return trans = [] for w in commsplit: t = self.vocabulary.get(w[:4], None) if t: trans.append(t) if not len(trans): self.printout("Tega sploh ne razumem. " "Poskusi povedati kako drugače.") elif len(trans) == 1 and trans[0] in self.location.connections: self.flags[2] = self.location_no = \ self.location.connections[trans[0]] self.location = self.locations[self.location_no] self.update_location() else: if len(trans) == 1: m = self.process_events(self.responses, 0, trans[0]) else: m = self.process_events(self.responses, 0, trans[0], trans[1]) if m == 0: if len(trans) == 1 and trans[0] < 16: self.printout("Mar ne vidiš, da v to smer ni poti?") else: self.printout("Tega pa ne morem.") self.process_events(self.process, 1) self.goljufija() def save_position(self, fname): f = open(fname, "wb") pickle.dump(self.flags, f, 1) pickle.dump([o.location for o in self.objects], f, 1) def load_position(self, fname): f = open(fname, "rb") self.flags = pickle.load(f) object_locations = pickle.load(f) self.location_no = self.flags[2] self.location = self.locations[self.location_no] for r in range(len(object_locations)): self.objects[r].location = object_locations[r] self.update_location() def printout(self, msg): self.izpisano += msg + "<br>" self.izpis.setHtml(self.izpisano) self.izpis.scrollContentsBy(0, 30000) def anykey(self): return QtWidgets.QMessageBox.information( None, "Čakam...", "Pritisni OK, pa bova nadaljevala") def set_location_description(self, msg, dirs): self.printout(msg) ####################################### # Cheating def ldesc(self, n): return self.locations[n].description[:40] def ldesci(self, n): return self.ldesc(n), n def lidesc(self, n): return n, self.ldesc(n) def repr_action(self, event, system, skipat=0, adddict=""): ldesci = self.ldesci lidesc = self.lidesc if not system: if event.word2 != 255: tc = " ".join((self.index_to_word[event.word1], self.index_to_word[event.word2], adddict)) elif event.word1 != 255: tc = " ".join((self.index_to_word[event.word1], adddict)) else: tc = adddict else: tc = adddict ta = [] for op, param1, param2 in event.conditions: if self.Event.cond_ops[op][0] == "AT": if skipat: continue else: if tc: tc += " [AT %s (%i)]" % ldesci(param1) else: tc = "AT %s (%i)" % ldesci(param1) else: s = "--> %s " % self.Event.cond_ops[op][0] if param1: if op < 4: s += "%i (%s...) " % lidesc(param1) elif op < 10: s += "%i (%s) " % (param1, self.objects[param1].description) elif op < 13: s += "%i " % param1 else: s += "%i %i " % (param1, param2) ta.append(s) for action in event.actions: tt = event.act_ops[action[0]] atype = event.types[action[0]] param1, param2 = (action[1] + (None, None))[:2] if atype == self.Event.LOC: tt += " %i (%s...)" % lidesc(param1) elif atype == self.Event.MSG: tt += " '%s'" % self.messages[param1] elif atype == self.Event.OBJ: tt += " '%s' (%i)" % ( self.objects[param1].description, param1) elif atype == self.Event.SWAP: tt += " '%s' (%i) '%s' (%i)" % ( self.objects[param1].description, param1, self.objects[param2].description, param2) elif event.nparams[action[0]] == 1: tt += " %i" % param1 elif event.nparams[action[0]] == 2: tt += " %i %i" % (param1, param2) ta.append(tt) return tc, ta, not tc @staticmethod def parse_tree(tree_widget, tree): tree_widget.clear() for state, events in tree: it = QtWidgets.QTreeWidgetItem(state) tree_widget.addTopLevelItem(it) for event in events: text, subnodes, is_open = (event + (None, None))[:3] if isinstance(text, str): it2 = QtWidgets.QTreeWidgetItem([text]) it.addChild(it2) if subnodes: it2.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in subnodes]) it2.setExpanded(True) else: it.addChildren(QtWidgets.QTreeWidgetItem([i]) for i in text) def goljufija_const(self): repr_act = self.repr_action ldesci = self.ldesci def getlocations(): def process_events(loc, table, system): acts, spec_exits, spec_approaches = [], [], [] for event in table: for op, param1, param2 in event.conditions: if op <= 1 and param1 == loc: for action in event.actions: if event.act_ops[action[0]] == "GOTO": if action[1][0] != loc: spec_exits.append( repr_act(event, system, 1, "-> %s (%i)" % ldesci(action[1][0]))) else: spec_approaches.append( repr_act(event, system, 1, "<- %s (%i)" % ldesci(param1))) break else: # It is not an exit acts.append(repr_act(event, system, 0)) break else: # There is no 'AT location'; # check whether this can be a special approach for action in event.actions: if event.act_ops[action[0]] == "GOTO" and \ action[1][0] == loc: spec_approaches.append(repr_act(event, system)) break # There is an 'AT location'; # check whether this is an exit event return acts, spec_exits, spec_approaches def process_exits(loc): return ["%s -> %s (%i)" % ((self.index_to_word[d],) + ldesci(n)) for d, n in self.locations[loc].connections.items()] def process_approaches(loc): app = [] for src, location in enumerate(self.locations): if loc in list(location.connections.values()): for d, n in location.connections.items(): if n == loc: app.append("%s (%i) -> %s" % (ldesci(src) + (self.index_to_word[d], ))) return app self.cheat_locations = {} for i in range(len(self.locations)): exits = process_exits(i) approaches = process_approaches(i) responses, se, sa = process_events(i, self.responses, 0) exits += se approaches += sa processes, se, sa = process_events(i, self.process, 1) exits += se approaches += sa self.cheat_locations[i] = (responses, processes) it = QtWidgets.QTreeWidgetItem( ["%s (%i)" % (self.locations[i].description, i)]) self.g_lokacije.addTopLevelItem(it) for name, content in ( ("Vhodi", approaches), ("Izhodi", exits), ("Ukazi", responses), ("Dogodki", processes)): if not content: continue it2 = QtWidgets.QTreeWidgetItem([name]) it.addChild(it2) for con in content: if isinstance(con, str): it3 = QtWidgets.QTreeWidgetItem([con]) else: it3 = QtWidgets.QTreeWidgetItem([con[0]]) it3.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in con[1]]) it3.setExpanded(True) it2.addChild(it3) it2.setExpanded(True) def getmessages(): def process_events(msg_no, table, system): acts = [] for event in table: for action in event.actions: if event.act_ops[action[0]][:3] == "MES" and \ action[1][0] == msg_no: break else: continue acts.append(repr_act(event, system)) return acts return [("%s (%i)" % (self.messages[i], i), process_events(i, self.responses, 0) + process_events(i, self.process, 1)) for i in range(len(self.messages))] def add_event_to_tree(tree, event, skip_at=0): tc, ta, isopen = repr_act(event, skip_at) it = QtWidgets.QTreeWidgetItem([tc]) tree.addTopLevelItem(it) it.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in ta]) def get_responses(): acts = [] trivial = {self.vocabulary["DAJ"]: "DROP", self.vocabulary["VZEM"]: "GET", self.vocabulary["OBLE"]: "WEAR", self.vocabulary["SLEC"]: "REMOVE"} for event in self.responses: if (not event.conditions and len(event.actions) == 2 and event.act_ops[event.actions[1][0]] in ["OK", "DONE"] and trivial.get(event.word1, None) == event.act_ops[event.actions[0][0]]): continue if event.word1 < 16: for op, param1, param2 in event.conditions: if not op: break else: self.g_sporocila.addTopLevelItem( QtWidgets.QTreeWidgetItem([repr_act(event, 0)])) continue add_event_to_tree(self.g_sporocila, event) def get_process(): for event in self.process: add_event_to_tree(self.g_dogodki, event, 1) return (getlocations(), getmessages(), get_responses(), get_process(), None) def goljufija(self): repr_act = self.repr_action def getlocation(): self.g_lokacija.clear() conn = list(self.location.connections.items()) if conn: it = QtWidgets.QTreeWidgetItem(["Izhodi"]) self.g_lokacija.addTopLevelItem(it) it.addChildren([QtWidgets.QTreeWidgetItem( ["%s: %s (%i)" % ( self.index_to_word[dire], self.locations[loc].description[:40], loc)]) for dire, loc in conn]) it.setExpanded(True) responses, processes = self.cheat_locations[self.location_no] if responses: it = QtWidgets.QTreeWidgetItem(["Ukazi"]) self.g_lokacija.addTopLevelItem(it) for content in responses: it2 = QtWidgets.QTreeWidgetItem([content[0]]) it.addChild(it2) it2.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in content[1]]) it2.setExpanded(True) it.setExpanded(True) if processes: it = QtWidgets.QTreeWidgetItem(["Dogodki"]) self.g_lokacija.addTopLevelItem(it) for content in processes: it2 = QtWidgets.QTreeWidgetItem([content[0]]) it.addChild(it2) it2.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in content[1]]) it2.setExpanded(True) it.setExpanded(True) objlocs = {self.Object.CARRIED: "imam", self.Object.WORN: "nosim", self.Object.NOT_CREATED: "ne obstaja", self.Object.INVALID: "ne obstaja"} def getobjects(): def process_events(object_no, table, system): acts = [] trivial = {self.vocabulary["DAJ"]: "DROP", self.vocabulary["VZEM"]: "GET", self.vocabulary["OBLE"]: "WEAR", self.vocabulary["SLEC"]: "REMOVE"} for event in table: if not system and not event.conditions and \ len(event.actions) == 2 and \ event.act_ops[event.actions[1][0]] in ["OK", "DONE"] \ and trivial.get(event.word1, None) == \ event.act_ops[event.actions[0][0]]: continue for op, param1, param2 in event.conditions: if 4 <= op <= 9 and param1 == object_no: break else: for action in event.actions: atype = event.types[action[0]] if (atype in [event.OBJ, event.SWAP] and action[1][0] == object_no or atype == self.Event.SWAP and action[1][1] == object_no): break else: continue # not interesting, does not mention # object_no neither in conditions nor # in actions acts.append(repr_act(event, system)) return acts def objloc(objno): loc = self.objects[objno].location if loc < 0xfc: return str(loc) else: return objlocs[loc] if not hasattr(self, "cheatobjects"): self.cheatobjects = [([self.objects[i].description, str(i), objloc(i)], process_events(i, self.responses, 0) + process_events(i, self.process, 1)) for i in range(len(self.objects))] else: for i in range(len(self.objects)): self.cheatobjects[i][0][2] = objloc(i) return self.cheatobjects def getflags(): flops = [Quill.Event.ptas[0][0].index(x) for x in ["PLUS", "MINUS", "SET", "CLEAR", "LET"]] def process_events(flag_no, table, system): acts = [] for event in table: for op, param1, param2 in event.conditions: if op >= 11 and param1 == flag_no: break else: for action in event.actions: if action[0] in flops and flag_no == action[1][0]: break else: continue # not interesting, does not mention the # flag neither in conditions nor in action acts.append(repr_act(event, system)) return acts if not hasattr(self, "cheatflags"): self.cheatflags = [(["%i = %i" % (i, self.flags[i])], process_events(i, self.responses, 0) + process_events(i, self.process, 1)) for i in range(len(self.flags))] else: self.cheatflags = [(["%i = %i" % (i, self.flags[i])], self.cheatflags[i][1]) for i in range(len(self.flags))] return self.cheatflags[:3] + [x for x in self.cheatflags[3:] if x[1]] getlocation() self.parse_tree(self.g_zastavice, getflags()) self.parse_tree(self.g_predmeti, getobjects()) app = QtWidgets.QApplication([]) q = Quill() app.exec()