text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
import argparse
import functools
import os
import pkg_resources
import socket
import ssl
import sys
from SocketServer import ThreadingMixIn
from trac import __version__ as VERSION
from trac.util import autoreload, daemon
from trac.util.text import printerr
from trac.web.auth import BasicAuthentication, DigestAuthentication
from trac.web.main import dispatch_request
from trac.web.wsgi import WSGIServer, WSGIRequestHandler
class AuthenticationMiddleware(object):
def __init__(self, application, auths, single_env_name=None):
self.application = application
self.auths = auths
self.single_env_name = single_env_name
if single_env_name:
self.part = 0
else:
self.part = 1
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO', '')
path_parts = filter(None, path_info.split('/'))
if len(path_parts) > self.part and path_parts[self.part] == 'login':
env_name = self.single_env_name or path_parts[0]
if env_name:
auth = self.auths.get(env_name, self.auths.get('*'))
if auth:
remote_user = auth.do_auth(environ, start_response)
if not remote_user:
return []
environ['REMOTE_USER'] = remote_user
return self.application(environ, start_response)
class BasePathMiddleware(object):
def __init__(self, application, base_path):
self.base_path = '/' + base_path.strip('/')
self.application = application
def __call__(self, environ, start_response):
path = environ['SCRIPT_NAME'] + environ.get('PATH_INFO', '')
environ['PATH_INFO'] = path[len(self.base_path):]
environ['SCRIPT_NAME'] = self.base_path
return self.application(environ, start_response)
class TracEnvironMiddleware(object):
def __init__(self, application, env_parent_dir, env_paths, single_env):
self.application = application
self.environ = {'trac.env_path': None}
if env_parent_dir:
self.environ['trac.env_parent_dir'] = env_parent_dir
elif single_env:
self.environ['trac.env_path'] = env_paths[0]
else:
self.environ['trac.env_paths'] = env_paths
def __call__(self, environ, start_response):
for k, v in self.environ.iteritems():
environ.setdefault(k, v)
return self.application(environ, start_response)
class TracHTTPServer(ThreadingMixIn, WSGIServer):
daemon_threads = True
def __init__(self, server_address, application, env_parent_dir, env_paths,
use_http_11=False):
request_handlers = (TracHTTPRequestHandler, TracHTTP11RequestHandler)
WSGIServer.__init__(self, server_address, application,
request_handler=request_handlers[bool(use_http_11)])
class TracHTTPRequestHandler(WSGIRequestHandler):
server_version = 'tracd/' + VERSION
def address_string(self):
# Disable reverse name lookups
return self.client_address[:2][0]
class TracHTTP11RequestHandler(TracHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def parse_args(args=None):
parser = argparse.ArgumentParser()
class _AuthAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self.cls = kwargs.pop('cls')
super(_AuthAction, self).__init__(option_strings, dest, nargs,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
info = values.split(',')
if len(info) != 3:
raise argparse.ArgumentError(self,
"Incorrect number of parameters")
env_name, filename, realm = info
filepath = os.path.abspath(filename)
if not os.path.exists(filepath):
raise argparse.ArgumentError(self,
"Path does not exist: '%s'"
% filename)
auths = getattr(namespace, self.dest)
if env_name in auths:
printerr("Ignoring duplicate authentication option for "
"project: %s" % env_name)
else:
auths.update({env_name: self.cls(filepath, realm)})
setattr(namespace, self.dest, auths)
class _PathAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self.must_exist = kwargs.pop('must_exist', False)
super(_PathAction, self).__init__(option_strings, dest, nargs,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
def to_abspath(path):
abspath = os.path.abspath(path)
if self.must_exist and not os.path.exists(abspath):
raise argparse.ArgumentError(self,
"Path does not exist: '%s'"
% path)
return abspath
if isinstance(values, list):
paths = [to_abspath(paths) for paths in values]
else:
paths = to_abspath(values)
setattr(namespace, self.dest, paths)
parser.add_argument('--version', action='version',
version='%%(prog)s %s' % VERSION)
parser.add_argument('envs', action=_PathAction, must_exist=True,
nargs='*', help="path of the project environment(s)")
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-e', '--env-parent-dir', action=_PathAction,
must_exist=True, metavar='PARENTDIR',
help="parent directory of the project "
"environments")
parser_group.add_argument('-s', '--single-env', action='store_true',
help="only serve a single project without the "
"project list")
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-a', '--auth', default={},
metavar='DIGESTAUTH', dest='auths',
action=_AuthAction, cls=DigestAuthentication,
help="[projectdir],[htdigest_file],[realm]")
parser_group.add_argument('--basic-auth', default={},
metavar='BASICAUTH', dest='auths',
action=_AuthAction, cls=BasicAuthentication,
help="[projectdir],[htpasswd_file],[realm]")
parser.add_argument('-p', '--port', type=int,
help="the port number to bind to")
parser.add_argument('-b', '--hostname', default='',
help="the host name or IP address to bind to")
parser.add_argument('--protocol', default='http',
choices=('http', 'https', 'scgi', 'ajp', 'fcgi'),
help="the server protocol (default: http)")
parser.add_argument('--certfile', help="PEM certificate file for HTTPS")
parser.add_argument('--keyfile', help="PEM key file for HTTPS")
parser.add_argument('-q', '--unquote', action='store_true',
help="unquote PATH_INFO (may be needed when using "
"the ajp protocol)")
parser.add_argument('--base-path', default='', # XXX call this url_base_path?
help="the initial portion of the request URL's "
"\"path\"")
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('--http10', action='store_false', dest='http11',
help="use HTTP/1.0 protocol instead of "
"HTTP/1.1")
parser_group.add_argument('--http11', action='store_true', default=True,
help="use HTTP/1.1 protocol (default)")
if os.name == 'posix':
class _GroupAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
import grp
try:
value = int(values)
except ValueError:
try:
value = grp.getgrnam(values)[2]
except KeyError:
raise argparse.ArgumentError(self, "group not found: "
"%r" % values)
setattr(namespace, self.dest, value)
class _UserAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
import pwd
try:
value = int(values)
except ValueError:
try:
value = pwd.getpwnam(values)[2]
except KeyError:
raise argparse.ArgumentError(self, "user not found: "
"%r" % values)
setattr(namespace, self.dest, value)
class _OctalValueAction(argparse.Action):
octal = functools.partial(int, base=8)
def __call__(self, parser, namespace, values, option_string=None):
try:
value = self.octal(values)
except ValueError:
raise argparse.ArgumentError(self, "Invalid octal umask "
"value: %r" % values)
setattr(namespace, self.dest, value)
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-r', '--auto-reload', action='store_true',
help="restart automatically when sources "
"are modified")
parser_group.add_argument('-d', '--daemonize', action='store_true',
help="run in the background as a daemon")
parser.add_argument('--pidfile', action=_PathAction,
help="file to write pid when daemonizing")
parser.add_argument('--umask', action=_OctalValueAction,
default=0o022, metavar='MASK',
help="when daemonizing, file mode creation mask "
"to use, in octal notation (default: 022)")
parser.add_argument('--group', action=_GroupAction,
help="the group to run as")
parser.add_argument('--user', action=_UserAction,
help="the user to run as")
else:
parser.add_argument('-r', '--auto-reload', action='store_true',
help="restart automatically when sources are "
"modified")
parser.set_defaults(daemonize=False, user=None, group=None)
args = parser.parse_args(args)
if not args.env_parent_dir and not args.envs:
parser.error("either the --env-parent-dir (-e) option or at least "
"one environment must be specified")
if args.single_env and len(args.envs) > 1:
parser.error("the --single-env (-s) option cannot be used with more "
"than one environment")
if args.protocol == 'https' and not args.certfile:
parser.error("the --certfile option is required when using the https "
"protocol")
if args.port is None:
args.port = {
'http': 80,
'https': 443,
'scgi': 4000,
'ajp': 8009,
'fcgi': 8000,
}[args.protocol]
return args
def main():
args = parse_args()
wsgi_app = TracEnvironMiddleware(dispatch_request, args.env_parent_dir,
args.envs, args.single_env)
if args.auths:
if args.single_env:
project_name = os.path.basename(args.envs[0])
wsgi_app = AuthenticationMiddleware(wsgi_app, args.auths,
project_name)
else:
wsgi_app = AuthenticationMiddleware(wsgi_app, args.auths)
base_path = args.base_path.strip('/')
if base_path:
wsgi_app = BasePathMiddleware(wsgi_app, base_path)
server_address = (args.hostname, args.port)
if args.protocol in ('http', 'https'):
def serve():
addr, port = server_address
if not addr or addr == '0.0.0.0':
loc = '0.0.0.0:%s view at %s://127.0.0.1:%s/%s' \
% (port, args.protocol, port, base_path)
else:
loc = '%s://%s:%s/%s' % (args.protocol, addr, port, base_path)
try:
httpd = TracHTTPServer(server_address, wsgi_app,
args.env_parent_dir, args.envs,
use_http_11=args.http11)
except socket.error as e:
print("Error starting Trac server on %s" % loc)
print("[Errno %s] %s" % e.args)
sys.exit(1)
print("Server starting in PID %s." % os.getpid())
print("Serving on %s" % loc)
if args.http11:
print("Using HTTP/1.1 protocol version")
if args.protocol == 'https':
httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True,
certfile=args.certfile,
keyfile=args.keyfile)
httpd.serve_forever()
elif args.protocol in ('scgi', 'ajp', 'fcgi'):
def serve():
try:
server_cls = __import__('flup.server.%s' % args.protocol,
None, None, ['']).WSGIServer
except ImportError:
printerr("Install the flup package to use the '%s' "
"protocol" % args.protocol)
sys.exit(1)
flup_app = wsgi_app
if args.unquote:
from trac.web.fcgi_frontend import FlupMiddleware
flup_app = FlupMiddleware(flup_app)
ret = server_cls(flup_app, bindAddress=server_address).run()
sys.exit(42 if ret else 0) # if SIGHUP exit with status 42
try:
if args.daemonize:
daemon.daemonize(pidfile=args.pidfile, progname='tracd',
umask=args.umask)
if args.group is not None:
os.setgid(args.group)
if args.user is not None:
os.setuid(args.user)
if args.auto_reload:
def modification_callback(file):
printerr("Detected modification of %s, restarting." % file)
autoreload.main(serve, modification_callback)
else:
serve()
except OSError as e:
printerr("%s: %s" % (e.__class__.__name__, e))
sys.exit(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
main()
|
{
"content_hash": "f58b191a9a60e2931e65a2cd2014b0de",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 82,
"avg_line_length": 41.951219512195124,
"alnum_prop": 0.524031007751938,
"repo_name": "rbaumg/trac",
"id": "4bf71abc08b4f7c2be931e3ff9fae84853e00765",
"size": "16319",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "trac/web/standalone.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1085"
},
{
"name": "C#",
"bytes": "114293"
},
{
"name": "CSS",
"bytes": "40666"
},
{
"name": "Groff",
"bytes": "1497"
},
{
"name": "JavaScript",
"bytes": "16747"
},
{
"name": "Python",
"bytes": "1287818"
},
{
"name": "Shell",
"bytes": "481"
},
{
"name": "Smalltalk",
"bytes": "11753"
}
],
"symlink_target": ""
}
|
"""
test_keepass_dbcheck
----------------------------------
Tests for `keepass_dbcheck` module.
"""
import unittest
from keepass_dbcheck import keepass_dbcheck
class TestKeepass_dbcheck(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7a11c03d74f82c587986b5012ed6be1f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 45,
"avg_line_length": 15.04,
"alnum_prop": 0.5797872340425532,
"repo_name": "justif/keepass_dbcheck",
"id": "f8c075ad5f0b1b1d522c49fc50371884c98587aa",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_keepass_dbcheck.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11981"
}
],
"symlink_target": ""
}
|
'''
Run things from the shell
'''
import os
import readline
from pprint import pprint
from flask import *
from app import *
os.environ['PYTHONINSPECT'] = 'True'
|
{
"content_hash": "34b107078a54ac827464c20e2bd0b587",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 36,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.725609756097561,
"repo_name": "rfairburn/mwostatter",
"id": "bac4608955ff8b8fb21e9c3fa572f368b13be32c",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/shell.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11426"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nssimpleacl6_stats(base_resource) :
ur""" Statistics for simple ACL6 resource.
"""
def __init__(self) :
self._clearstats = ""
self._sacl6tothits = 0
self._sacl6hitsrate = 0
self._sacl6totmisses = 0
self._sacl6missesrate = 0
self._sacl6scount = 0
self._sacl6totpktsallowed = 0
self._sacl6pktsallowedrate = 0
self._sacl6totpktsbridged = 0
self._sacl6pktsbridgedrate = 0
self._sacl6totpktsdenied = 0
self._sacl6pktsdeniedrate = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def sacl6scount(self) :
ur"""Number of SimpleACL6s configured.
"""
try :
return self._sacl6scount
except Exception as e:
raise e
@property
def sacl6pktsbridgedrate(self) :
ur"""Rate (/s) counter for sacl6totpktsbridged.
"""
try :
return self._sacl6pktsbridgedrate
except Exception as e:
raise e
@property
def sacl6totpktsallowed(self) :
ur"""Total packets that matched a SimpleACL6 with action ALLOW and got consumed by NetScaler.
"""
try :
return self._sacl6totpktsallowed
except Exception as e:
raise e
@property
def sacl6totmisses(self) :
ur"""Packets not matching any SimpleACL6.
"""
try :
return self._sacl6totmisses
except Exception as e:
raise e
@property
def sacl6missesrate(self) :
ur"""Rate (/s) counter for sacl6totmisses.
"""
try :
return self._sacl6missesrate
except Exception as e:
raise e
@property
def sacl6hitsrate(self) :
ur"""Rate (/s) counter for sacl6tothits.
"""
try :
return self._sacl6hitsrate
except Exception as e:
raise e
@property
def sacl6tothits(self) :
ur"""Packets matching a SimpleACL6.
"""
try :
return self._sacl6tothits
except Exception as e:
raise e
@property
def sacl6totpktsdenied(self) :
ur"""Packets dropped because they match SimpleACL6 with processing mode set to DENY.
"""
try :
return self._sacl6totpktsdenied
except Exception as e:
raise e
@property
def sacl6pktsallowedrate(self) :
ur"""Rate (/s) counter for sacl6totpktsallowed.
"""
try :
return self._sacl6pktsallowedrate
except Exception as e:
raise e
@property
def sacl6totpktsbridged(self) :
ur"""Total packets that matched a SimpleACL6 with action BRIDGE and got bridged by NetScaler.
"""
try :
return self._sacl6totpktsbridged
except Exception as e:
raise e
@property
def sacl6pktsdeniedrate(self) :
ur"""Rate (/s) counter for sacl6totpktsdenied.
"""
try :
return self._sacl6pktsdeniedrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nssimpleacl6_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nssimpleacl6
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all nssimpleacl6_stats resources that are configured on netscaler.
"""
try :
obj = nssimpleacl6_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class nssimpleacl6_response(base_response) :
def __init__(self, length=1) :
self.nssimpleacl6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nssimpleacl6 = [nssimpleacl6_stats() for _ in range(length)]
|
{
"content_hash": "26c184943817bec3f54faf3e57871c50",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 135,
"avg_line_length": 24.76683937823834,
"alnum_prop": 0.7035564853556485,
"repo_name": "benfinke/ns_python",
"id": "0224ed613be4aa6490686783bb86f179b988d8f9",
"size": "5394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/stat/ns/nssimpleacl6_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
import click
import glob
import sys
import sh
import os.path
from datetime import datetime
from collections import ChainMap
from . import borg, util
from .report import ActionSuccess, ActionFailure, report_success, report_failure, send_report
def main_inner(config, source, remote, create):
if create:
command_config = ChainMap(util.lookup(config, ['backup', 'create'], {}), config)
else:
command_config = ChainMap(util.lookup(config, ['backup', 'init'], {}), config)
source_list = config['backup']['sources'].keys()
for source_name in source_list:
if len(source) > 0 and source_name not in source:
continue
source_config = command_config.new_child(config['backup']['sources'][source_name])
repo_name = source_config.get('repo_name', source_name)
if 'remote_list' in source_config:
remote_list = source_config['remote_list']
else:
remote_list = config.get('remotes', {}).keys()
if len(remote) > 0:
remote_list = [r for r in remote_list if r in remote]
if len(remote_list) == 0:
continue
if create and 'pre_create_hook' in source_config:
hook_config = source_config.new_child(source_config['pre_create_hook'])
print("\n")
print("Running pre-create-hook for source", source_name)
try:
borg.hook(hook_config)
report_success(ActionSuccess('pre-create-hook', (source_name,)))
except Exception as e:
report_failure(ActionFailure('pre-create-hook', (source_name,), e))
for remote_name in remote_list:
report_failure(ActionFailure('create', (source_name, remote_name), 'skipped because of failed pre-create-hook'))
report_failure(ActionFailure('post-create-hook', (source_name,), 'skipped because of failed pre-create-hook'))
continue
for remote_name in remote_list:
remote_config = source_config.new_child(config['remotes'][remote_name])
if create:
date = str(datetime.now().isoformat())
archive = remote_config.get('archive_name', 'auto_{datetime}').format(datetime=date)
print("\n")
print("Backing up the source", source_name, "to the remote", remote_name)
try:
borg.create(remote_config, remote_name, repo_name, archive)
report_success(ActionSuccess('create', (source_name, remote_name)))
except Exception as e:
report_failure(ActionFailure('create', (source_name, remote_name), e))
else:
print("\n")
print("Initializing the repo", repo_name, "on the remote", remote_name)
try:
borg.init(remote_config, remote_name, repo_name)
report_success(ActionSuccess('init', (source_name, remote_name)))
except Exception as e:
report_failure(ActionFailure('init', (source_name, remote_name), e))
if create and 'post_create_hook' in source_config:
hook_config = source_config.new_child(source_config['post_create_hook'])
print("\n")
print("Running post-create-hook for source", source_name)
try:
borg.hook(hook_config)
report_success(ActionSuccess('post_create_hook', (source_name,)))
except Exception as e:
report_failure(ActionFailure('post-create-hook', (source_name,), e))
@click.command()
@click.option('-s', '--source', multiple=True)
@click.option('-r', '--remote', multiple=True)
@click.option('--create/--init', default=True)
@click.pass_obj
def main(config, source, remote, create):
main_inner(config, source, remote, create)
|
{
"content_hash": "79adb2df2ad4d0111fa6a95f6d80f2e2",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 132,
"avg_line_length": 42.1505376344086,
"alnum_prop": 0.5903061224489796,
"repo_name": "grensjo/borg-summon",
"id": "9f2cd90839e729189aa33c025bed536ff6d7ee6c",
"size": "3920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/borg_summon/backup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42913"
}
],
"symlink_target": ""
}
|
"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from cStringIO import StringIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
class PlistWriter(DumbXMLWriter):
XMLHEADER = '<?xml version="1.0" encoding="UTF-8"?>\n' # Standard header for XML/XSL documents
PLISTHEADER = XMLHEADER + """\
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(self.PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (str, unicode)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", str(value))
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
|
{
"content_hash": "056b2f24ad934e7e9ca9a448652f1ff5",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 152,
"avg_line_length": 32.22863247863248,
"alnum_prop": 0.6139362195849632,
"repo_name": "petrvanblokland/Xierpa3",
"id": "0755458c0c937f472f7566f55d60917c8f2ffff6",
"size": "15107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xierpa3/toolbox/storage/plistlib.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41394"
},
{
"name": "JavaScript",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "1349828"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='textinator',
description='A command line image viewer',
version='0.1.0',
author='Daan Rijks',
autor_email='daanrijks@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=[
'click>=3.3, <4',
'Pillow>=2.6.1, <3',
'ansi>=0.1.2, <1'
],
entry_points='''
[console_scripts]
textinator=textinator:textinator
'''
)
|
{
"content_hash": "247baf41105ab5b7eabe66a9c80274d7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 46,
"avg_line_length": 20.347826086956523,
"alnum_prop": 0.5726495726495726,
"repo_name": "ijks/textinator",
"id": "34ddb2e65c7961d4c80fa3f297641eeebe3e377a",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8194"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_struct_maint_module_1.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","struct_maint_module_1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e0bb5dc6001e64e0bc4589ac395272c0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.7113702623906706,
"repo_name": "obi-two/Rebelion",
"id": "74d6161e0f8d5d228264c98bc6fc603c827216f2",
"size": "488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/component/droid/shared_struct_maint_module_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Example:
# (r'^mysite/', include('mysite.foo.urls')),
# Uncomment this for admin:
(r'^admin/', include('django.contrib.admin.urls')),
)
|
{
"content_hash": "a209fa8dc8293cc1d892176e04fe7b41",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.6294642857142857,
"repo_name": "bbc/kamaelia",
"id": "cf6a7a1840d5519353121f6893e7974efd48f73a",
"size": "1030",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/JMB/mysite/urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import collections
import datetime
from itertools import repeat
import warnings
from django.conf import settings
from django.db.models.fields import DateTimeField, Field
from django.db.models.sql.datastructures import EmptyResultSet, Empty
from django.db.models.sql.aggregates import Aggregate
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import xrange
from django.utils import timezone
from django.utils import tree
# Connection types
AND = 'AND'
OR = 'OR'
class EmptyShortCircuit(Exception):
"""
Internal exception used to indicate that a "matches nothing" node should be
added to the where-clause.
"""
pass
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually a tuple of:
(Constraint(alias, targetcol, field), lookup_type, value)
where value can be either raw Python value, or Query, ExpressionNode or
something else knowing how to turn itself into SQL.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods. The
second alternative should be used if the alias is not the only mutable
variable.
"""
default = AND
def _prepare_data(self, data):
"""
Prepare data for addition to the tree. If the data is a list or tuple,
it is expected to be of the form (obj, lookup_type, value), where obj
is a Constraint object, and is then slightly munged before being
stored (to avoid storing any reference to field objects). Otherwise,
the 'data' is stored unchanged and can be any class with an 'as_sql()'
method.
"""
if not isinstance(data, (list, tuple)):
return data
obj, lookup_type, value = data
if isinstance(value, collections.Iterator):
# Consume any generators immediately, so that we can determine
# emptiness and transform any non-empty values correctly.
value = list(value)
# The "value_annotation" parameter is used to pass auxilliary information
# about the value(s) to the query construction. Specifically, datetime
# and empty values need special handling. Other types could be used
# here in the future (using Python types is suggested for consistency).
if (isinstance(value, datetime.datetime)
or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')):
value_annotation = datetime.datetime
elif hasattr(value, 'value_annotation'):
value_annotation = value.value_annotation
else:
value_annotation = bool(value)
if hasattr(obj, "prepare"):
value = obj.prepare(lookup_type, value)
return (obj, lookup_type, value_annotation, value)
def as_sql(self, qn, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
# Note that the logic here is made slightly more complex than
# necessary because there are two kind of empty nodes: Nodes
# containing 0 children, and nodes that are known to match everything.
# A match-everything node is different than empty node (which also
# technically matches everything) for backwards compatibility reasons.
# Refs #5261.
result = []
result_params = []
everything_childs, nothing_childs = 0, 0
non_empty_childs = len(self.children)
for child in self.children:
try:
if hasattr(child, 'as_sql'):
sql, params = qn.compile(child)
else:
# A leaf node in the tree.
sql, params = self.make_atom(child, qn, connection)
except EmptyResultSet:
nothing_childs += 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
if sql is None:
# Skip empty childs totally.
non_empty_childs -= 1
continue
everything_childs += 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
if self.connector == AND:
full_needed, empty_needed = non_empty_childs, 1
else:
full_needed, empty_needed = 1, non_empty_childs
# Now, check if this node is full/empty using the
# counts.
if empty_needed - nothing_childs <= 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed - everything_childs <= 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
if non_empty_childs == 0:
# All the child nodes were empty, so this one is empty, too.
return None, []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
if hasattr(child, 'get_group_by_cols'):
cols.extend(child.get_group_by_cols())
else:
if isinstance(child[0], Constraint):
cols.append((child[0].alias, child[0].col))
if hasattr(child[3], 'get_group_by_cols'):
cols.extend(child[3].get_group_by_cols())
return cols
def make_atom(self, child, qn, connection):
"""
Turn a tuple (Constraint(table_alias, column_name, db_type),
lookup_type, value_annotation, params) into valid SQL.
The first item of the tuple may also be an Aggregate.
Returns the string for the SQL fragment and the parameters to use for
it.
"""
warnings.warn(
"The make_atom() method will be removed in Django 1.9. Use Lookup class instead.",
RemovedInDjango19Warning)
lvalue, lookup_type, value_annotation, params_or_value = child
field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None
if isinstance(lvalue, Constraint):
try:
lvalue, params = lvalue.process(lookup_type, params_or_value, connection)
except EmptyShortCircuit:
raise EmptyResultSet
elif isinstance(lvalue, Aggregate):
params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection)
else:
raise TypeError("'make_atom' expects a Constraint or an Aggregate "
"as the first item of its 'child' argument.")
if isinstance(lvalue, tuple):
# A direct database column lookup.
field_sql, field_params = self.sql_for_columns(lvalue, qn, connection, field_internal_type), []
else:
# A smart object with an as_sql() method.
field_sql, field_params = qn.compile(lvalue)
is_datetime_field = value_annotation is datetime.datetime
cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s'
if hasattr(params, 'as_sql'):
extra, params = qn.compile(params)
cast_sql = ''
else:
extra = ''
params = field_params + params
if (len(params) == 1 and params[0] == '' and lookup_type == 'exact'
and connection.features.interprets_empty_strings_as_nulls):
lookup_type = 'isnull'
value_annotation = True
if lookup_type in connection.operators:
format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),)
return (format % (field_sql,
connection.operators[lookup_type] % cast_sql,
extra), params)
if lookup_type == 'in':
if not value_annotation:
raise EmptyResultSet
if extra:
return ('%s IN %s' % (field_sql, extra), params)
max_in_list_size = connection.ops.max_in_list_size()
if max_in_list_size and len(params) > max_in_list_size:
# Break up the params list into an OR of manageable chunks.
in_clause_elements = ['(']
for offset in xrange(0, len(params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % field_sql)
group_size = min(len(params) - offset, max_in_list_size)
param_group = ', '.join(repeat('%s', group_size))
in_clause_elements.append(param_group)
in_clause_elements.append(')')
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return ('%s IN (%s)' % (field_sql,
', '.join(repeat('%s', len(params)))),
params)
elif lookup_type in ('range', 'year'):
return ('%s BETWEEN %%s and %%s' % field_sql, params)
elif is_datetime_field and lookup_type in ('month', 'day', 'week_day',
'hour', 'minute', 'second'):
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname)
return ('%s = %%s' % sql, tz_params + params)
elif lookup_type in ('month', 'day', 'week_day'):
return ('%s = %%s'
% connection.ops.date_extract_sql(lookup_type, field_sql), params)
elif lookup_type == 'isnull':
assert value_annotation in (True, False), "Invalid value_annotation for isnull"
return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ())
elif lookup_type == 'search':
return (connection.ops.fulltext_search_sql(field_sql), params)
elif lookup_type in ('regex', 'iregex'):
return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
raise TypeError('Invalid lookup_type: %r' % lookup_type)
def sql_for_columns(self, data, qn, connection, internal_type=None):
"""
Returns the SQL fragment used for the left-hand side of a column
constraint (for example, the "T1.foo" portion in the clause
"WHERE ... T1.foo = 6") and a list of parameters.
"""
table_alias, name, db_type = data
if table_alias:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
return connection.ops.field_cast_sql(db_type, internal_type) % lhs
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
elif isinstance(child, (list, tuple)):
# tuple starting with Constraint
child = (child[0].relabeled_clone(change_map),) + child[1:]
if hasattr(child[3], 'relabeled_clone'):
child = (child[0], child[1], child[2]) + (
child[3].relabeled_clone(change_map),)
self.children[pos] = child
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
class EmptyWhere(WhereNode):
def add(self, data, connector):
return
def as_sql(self, qn=None, connection=None):
raise EmptyResultSet
class EverythingNode(object):
"""
A node that matches everything.
"""
def as_sql(self, qn=None, connection=None):
return '', []
class NothingNode(object):
"""
A node that matches nothing.
"""
def as_sql(self, qn=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, qn=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class Constraint(object):
"""
An object that can be passed to WhereNode.add() and knows how to
pre-process itself prior to including in the WhereNode.
"""
def __init__(self, alias, col, field):
warnings.warn(
"The Constraint class will be removed in Django 1.9. Use Lookup class instead.",
RemovedInDjango19Warning)
self.alias, self.col, self.field = alias, col, field
def prepare(self, lookup_type, value):
if self.field and not hasattr(value, 'as_sql'):
return self.field.get_prep_lookup(lookup_type, value)
return value
def process(self, lookup_type, value, connection):
"""
Returns a tuple of data suitable for inclusion in a WhereNode
instance.
"""
# Because of circular imports, we need to import this here.
from django.db.models.base import ObjectDoesNotExist
try:
if self.field:
params = self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=True)
db_type = self.field.db_type(connection=connection)
else:
# This branch is used at times when we add a comparison to NULL
# (we don't really want to waste time looking up the associated
# field object at the calling location).
params = Field().get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=True)
db_type = None
except ObjectDoesNotExist:
raise EmptyShortCircuit
return (self.alias, self.col, db_type), params
def relabeled_clone(self, change_map):
if self.alias not in change_map:
return self
else:
new = Empty()
new.__class__ = self.__class__
new.alias, new.col, new.field = change_map[self.alias], self.col, self.field
return new
class SubqueryConstraint(object):
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, qn, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if not hasattr(query, 'field_names'):
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, qn)
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(
self.alias, self.columns, self.targets,
self.query_object)
|
{
"content_hash": "f31006f39fd63ceb54092c833d523321",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 107,
"avg_line_length": 40.210648148148145,
"alnum_prop": 0.5762477692706235,
"repo_name": "errx/django",
"id": "2b315be7c800b4544097404a6ee1dbdf2dc82034",
"size": "17371",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/models/sql/where.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9469402"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
import sys
import argparse
import logbook
from logbook import handlers
from rq import Queue, Worker
from redis.exceptions import ConnectionError
from rq.scripts import add_standard_arguments
from rq.scripts import setup_redis
from rq.scripts import read_config_file
from rq.scripts import setup_default_arguments
from controller import bots, getNotiData, self_match_pattern
from failure_handler import do_job_failure_handler_have_a_rest
from ai import plugin_modules
def format_colors(record, handler):
from rq.utils import make_colorizer
if record.level == logbook.WARNING:
colorize = make_colorizer('darkyellow')
elif record.level >= logbook.ERROR:
colorize = make_colorizer('darkred')
else:
colorize = lambda x: x
return '%s: %s' % (record.time.strftime('%H:%M:%S'), colorize(record.msg))
def setup_loghandlers(args):
if args.verbose:
loglevel = logbook.DEBUG
formatter = None
else:
loglevel = logbook.INFO
formatter = format_colors
handlers.NullHandler(bubble=False).push_application()
handler = handlers.StreamHandler(sys.stdout, level=loglevel, bubble=False)
if formatter:
handler.formatter = formatter
handler.push_application()
handler = handlers.StderrHandler(level=logbook.WARNING, bubble=False)
if formatter:
handler.formatter = formatter
handler.push_application()
def parse_args():
parser = argparse.ArgumentParser(description='Starts an RQ worker.')
add_standard_arguments(parser)
parser.add_argument('--burst', '-b', action='store_true', default=False, help='Run in burst mode (quit after all work is done)')
parser.add_argument('--name', '-n', default=None, help='Specify a different name')
parser.add_argument('--path', '-P', default='.', help='Specify the import path.')
parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show more output')
parser.add_argument('--sentry-dsn', action='store', default=None, metavar='URL', help='Report exceptions to this Sentry DSN')
parser.add_argument('queues', nargs='*', default=['default'], help='The queues to listen on (default: \'default\')')
return parser.parse_args()
def main():
args = parse_args()
if args.path:
sys.path = args.path.split(':') + sys.path
settings = {}
if args.config:
settings = read_config_file(args.config)
setup_default_arguments(args, settings)
# Other default arguments
if args.sentry_dsn is None:
args.sentry_dsn = settings.get('SENTRY_DSN', None)
setup_loghandlers(args)
setup_redis(args)
try:
queues = map(Queue, args.queues)
w = Worker(queues, name=args.name)
w.push_exc_handler(do_job_failure_handler_have_a_rest)
# Should we configure Sentry?
if args.sentry_dsn:
from raven import Client
from rq.contrib.sentry import register_sentry
client = Client(args.sentry_dsn)
register_sentry(client, w)
w.work(burst=args.burst)
except ConnectionError as e:
print(e)
sys.exit(1)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding(sys.getfilesystemencoding())
main()
|
{
"content_hash": "9138a8b93738e8272fbc052f4499ed13",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 132,
"avg_line_length": 33.295918367346935,
"alnum_prop": 0.6739197057922157,
"repo_name": "wangjun/xiaohuangji",
"id": "ac8b54b2e5dd88c5f91b2b978276757babcbce91",
"size": "3285",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "rqworker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106785"
},
{
"name": "Shell",
"bytes": "1729"
}
],
"symlink_target": ""
}
|
import os
#import shutil
import argparse
import xml.dom.minidom
import re
import pprint
import sys
import datetime
# TODO: get namespace prefixes from root element
xs_prefix = 'xs:'
aae_prefix = 'aae:'
# ASSUMPTION:
# 1) any elements from XMLSchema are using 'xs' as namespace prefix
# 2) any definded types are referenced using 'aae' prefix
# 3) the complex types defining the for 4 node categories are:
master_types = {
'MachineAttributes': 'Machine',
'ResourceAttributes': 'Resource',
'SoftwareAttributes': 'Software',
'ApplicationAttributes': 'Application'
}
# ----------------------------------------------------------------------- #
# functions
# ----------------------------------------------------------------------- #
def extract_from_xml(cfg, data_hash):
dom = xml.dom.minidom.parse(cfg.schema_file)
root = dom.getElementsByTagName(xs_prefix+"schema")
version = None
#pp = pprint.PrettyPrinter(indent=4)
if root:
version = root[0].getAttribute("version")
# temporary data structures:
el_data = {}
ct_data = {}
for child in root[0].childNodes:
if child.nodeType == child.ELEMENT_NODE \
and child.tagName == xs_prefix+'element':
el_name = child.getAttribute("name")
parsed_data = parse_element_data(child)
if parsed_data:
#print "FOUND EL: [%s]" % el_name
el_data[el_name] = parsed_data
#pp.pprint(parsed_data)
if child.nodeType == child.ELEMENT_NODE \
and child.tagName == xs_prefix+'complexType':
ct_name = child.getAttribute("name")
parsed_data = parse_complex_type(child)
if parsed_data and parsed_data.has_key("TYPE"):
#print "FOUND CT: [%s]" % ct_name
ct_data[ct_name] = parsed_data
#pp.pprint(parsed_data)
# post process phase II:
for el_name in el_data.keys():
if not el_data[el_name].has_key("TYPE"):
if ct_data.has_key(el_data[el_name]["_base"]):
data = ct_data[el_data[el_name]["_base"]]
for attr in data.keys():
if not attr.startswith('_'):
el_data[el_name][attr] = data[attr]
#print "POSTPROCESSED: [%s]" % el_name
#pp.pprint(el_data[el_name])
# prepare man structure of data hash
data_hash['Machine'] = { 'elements' : {} }
data_hash['Software'] = { 'types' : {} ,
'elements' : {} }
data_hash['Resource'] = { 'types' : {} ,
'elements' : {} }
data_hash['Application'] = { 'types' : {} ,
'elements' : {} }
for el_name in el_data.keys():
#print "DEBUG", el_data[el_name]
data_hash[el_data[el_name]["TYPE"]]['elements'][el_name] = \
el_data[el_name]
for ct_name in ct_data.keys():
data_hash[ct_data[ct_name]["TYPE"]]['types'][ct_name] = \
ct_data[ct_name]
else:
# TODO: throw exception
print "NO ROOT ELEMENT FOUND"
return version
def parse_element_data(node):
parsed_data = {}
if node.nodeType != node.ELEMENT_NODE \
or node.tagName != xs_prefix+'element':
return
# search for base type
ext_els = node.getElementsByTagName(xs_prefix+"extension")
if ext_els == None or ext_els.length == 0:
return
ext_el = ext_els[0]
base_name = strip_aae_prefix(ext_el.getAttribute("base"))
parsed_data["_base"] = base_name
# and dig for fixed attributes
for child in ext_el.childNodes:
if child.nodeType == child.ELEMENT_NODE \
and child.tagName == xs_prefix+'attribute':
fixed = child.getAttribute("fixed")
if fixed == None:
return
parsed_data["_fixed"] = {}
parsed_data["_fixed"]["name"] = child.getAttribute("name")
parsed_data["_fixed"]["type"] = strip_aae_prefix(child.getAttribute("type"))
parsed_data["_fixed"]["value"] = fixed
# dig for documentation
parsed_data["_doc"] = []
for doc_el in node.getElementsByTagName(xs_prefix+"documentation"):
lang = doc_el.getAttribute("xml:lang")
if lang == None or lang == "" or lang == "en":
parsed_data["_doc"].append(getXMLText(doc_el))
# post process phase I
if master_types.has_key(parsed_data["_base"]):
parsed_data["TYPE"] = master_types[parsed_data["_base"]]
parsed_data[parsed_data["_fixed"]["type"]] = parsed_data["_fixed"]["value"]
return parsed_data
def parse_complex_type(node):
parsed_data = {}
if node.nodeType != node.ELEMENT_NODE \
or node.tagName != xs_prefix+'complexType':
return
# search for base type
ext_els = node.getElementsByTagName(xs_prefix+"extension")
if ext_els == None or ext_els.length == 0:
return
ext_el = ext_els[0]
base_name = strip_aae_prefix(ext_el.getAttribute("base"))
parsed_data["_base"] = base_name
# and dig for fixed attributes
for child in ext_el.childNodes:
if child.nodeType == child.ELEMENT_NODE \
and child.tagName == xs_prefix+'attribute':
fixed = child.getAttribute("fixed")
if fixed == None:
return
parsed_data["_fixed"] = {}
parsed_data["_fixed"]["name"] = child.getAttribute("name")
parsed_data["_fixed"]["type"] = strip_aae_prefix(child.getAttribute("type"))
parsed_data["_fixed"]["value"] = fixed
# dig for documentation
parsed_data["_doc"] = []
for doc_el in node.getElementsByTagName(xs_prefix+"documentation"):
lang = doc_el.getAttribute("xml:lang")
if lang == None or lang == "" or lang == "en":
parsed_data["_doc"].append(getXMLText(doc_el))
# post process phase I
if master_types.has_key(parsed_data["_base"]):
parsed_data["TYPE"] = master_types[parsed_data["_base"]]
parsed_data[parsed_data["_fixed"]["type"]] = parsed_data["_fixed"]["value"]
return parsed_data
def strip_aae_prefix(inStr):
# strip 'aae:'
outStr= re.sub(r'^'+aae_prefix, '', inStr)
return outStr
# ----------------------------------------------------------------------- #
# helper functions for XHTML output
# ----------------------------------------------------------------------- #
def printHTMLHeader():
print "<html><body>"
def printHTMLFooter():
print "</body></html>"
def printHTMLTitle(version):
print "<h1>Node classifications from MARS Schema %s</h1>" % version
def printHTMLTable3Col(data_hash, title, classkey, subclasskey):
print "<h2>%s</h2>" % title
print "<table><thead><tr><th align=\"left\">%s</th><th align=\"left\">%s</th><th align=\"left\">Description</th></tr></thead>" % (classkey, subclasskey)
print "<tbody>"
# here we do the data ordering
a_classes = {}
a_sub_classes = {}
for el in data_hash.keys():
a_class = data_hash[el][classkey]
a_sub_class = data_hash[el][subclasskey]
a_classes[a_class] = 0 # dummy value
if not a_sub_classes.has_key(a_class):
a_sub_classes[a_class] = []
a_sub_classes[a_class].append(a_sub_class)
# sort arrays...
for cl in sorted(a_classes.keys()):
for sub_cl in sorted(a_sub_classes[cl]):
# here we use the fact that 'key' in data_hash equals sub_cl
#print "DEBUG: title=[%s] cl=[%s] scl=[%s]" % (title, cl, sub_cl)
if data_hash.has_key(sub_cl):
print "<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (cl, sub_cl, "<br />".join(data_hash[sub_cl]['_doc']))
else:
sys.stderr.write("Error in Schema for "+subclasskey+ " \""+sub_cl+"\"\n")
print "</tbody></table>"
def printHTMLTable2Col(data_hash, title, classkey):
print "<h2>%s</h2>" % title
print "<table><thead><tr><th align=\"left\">%s</th><th align=\"left\">Description</th></tr></thead>" % (classkey)
print "<tbody>"
# here we do the data ordering
a_classes = {}
for el in data_hash.keys():
a_class = data_hash[el][classkey]
a_classes[a_class] = 0 # dummy value
# sort arrays...
for cl in sorted(a_classes.keys()):
# here we use the fact that 'key' in data_hash equals sub_cl
#print "DEBUG: title=[%s] cl=[%s] scl=[%s]" % (title, cl, sub_cl)
if data_hash.has_key(cl):
print "<tr><td>%s</td><td>%s</td></tr>" % (cl, "<br />".join(data_hash[cl]['_doc']))
else:
sys.stderr.write("Error in Schema for "+classkey+ " \""+cl+"\"\n")
print "</tbody></table>"
# ----------------------------------------------------------------------- #
# helper functions for Asciidoc output
# ----------------------------------------------------------------------- #
def printAsciiDocHeader(date):
print ":toc:"
print ":revdate: %s" % date
print "\n<<<\n"
print "[NOTE]\n====\nThe MARS Schema uses a different versioning cycle from HIRO Product. It is expected to see the two versions deviate from each other.\n\nThis list was last updated on: *{revdate}*\n====\n\n"
def printAsciiDocFooter():
print ""
def printAsciiDocTitle(version):
print "= Node classifications from MARS Schema %s" % version
def printAsciiDocTable3Col(data_hash, title, classkey, subclasskey):
print "== %s\n" % title
print "[cols=\"1,1,3\", options=\"header\"]"
print "|==="
print "|%s|%s|Description" % (classkey, subclasskey)
# here we do the data ordering
a_classes = {}
a_sub_classes = {}
for el in data_hash.keys():
a_class = data_hash[el][classkey]
a_sub_class = data_hash[el][subclasskey]
a_classes[a_class] = 0 # dummy value
if not a_sub_classes.has_key(a_class):
a_sub_classes[a_class] = []
a_sub_classes[a_class].append(a_sub_class)
# sort arrays...
for cl in sorted(a_classes.keys()):
for sub_cl in sorted(a_sub_classes[cl]):
# here we use the fact that 'key' in data_hash equals sub_cl
#print "DEBUG: title=[%s] cl=[%s] scl=[%s]" % (title, cl, sub_cl)
if data_hash.has_key(sub_cl):
print "|%s|%s|%s" % (cl, sub_cl, "<br />".join(data_hash[sub_cl]['_doc']))
else:
sys.stderr.write("Error in Schema for "+subclasskey+ " \""+sub_cl+"\"\n")
print "|===\n"
def printAsciiDocTable2Col(data_hash, title, classkey):
print "== %s\n" % title
print "[cols=\"1,5\", options=\"header\"]"
print "|==="
print "|%s|Description" % (classkey)
# here we do the data ordering
a_classes = {}
for el in data_hash.keys():
a_class = data_hash[el][classkey]
a_classes[a_class] = 0 # dummy value
# sort arrays...
for cl in sorted(a_classes.keys()):
# here we use the fact that 'key' in data_hash equals sub_cl
#print "DEBUG: title=[%s] cl=[%s] scl=[%s]" % (title, cl, sub_cl)
if data_hash.has_key(cl):
print "|%s|%s" % (cl, "<br />".join(data_hash[cl]['_doc']))
else:
sys.stderr.write("Error in Schema for "+classkey+ " \""+cl+"\"\n")
print "|===\n"
# ----------------------------------------------------------------------- #
# helper function:
# retrieve text from a XML node
# ----------------------------------------------------------------------- #
def getXMLText(node):
rc = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
rc.append(child.data)
return ''.join(rc)
# ----------------------------------------------------------------------- #
# main
# ----------------------------------------------------------------------- #
date = datetime.datetime.now().strftime("%d-%b-%Y")
parser = argparse.ArgumentParser(description='parse specified MARS Schema file and generates a table of MARS node classifications')
parser.add_argument("-s", "--schema", dest="schema_file", help="path to MARS schema file", required=True)
parser.add_argument("-F", "--format", dest="output_format", help="format of result", default='html')
args = parser.parse_args()
# main data structure
data = {}
# will contain:
schema_version = extract_from_xml(args, data)
if args.output_format == 'html':
printHTMLHeader()
printHTMLTitle(schema_version)
printHTMLTable3Col(data['Application']['elements'],
"Application Node Classifications",
"ApplicationClass",
"ApplicationSubClass" )
printHTMLTable2Col(data['Resource']['elements'],
"Resource Node Classifications",
"ResourceClass" )
printHTMLTable3Col(data['Software']['elements'],
"Software Node Classifications",
"SoftwareClass",
"SoftwareSubClass" )
printHTMLTable2Col(data['Machine']['elements'],
"Machine Node Classifications",
"MachineClass" )
printHTMLFooter()
elif args.output_format == 'asciidoc':
printAsciiDocTitle(schema_version)
printAsciiDocHeader(date)
printAsciiDocTable3Col(data['Application']['elements'],
"Application Node Classifications",
"ApplicationClass",
"ApplicationSubClass" )
printAsciiDocTable2Col(data['Resource']['elements'],
"Resource Node Classifications",
"ResourceClass" )
printAsciiDocTable3Col(data['Software']['elements'],
"Software Node Classifications",
"SoftwareClass",
"SoftwareSubClass" )
printAsciiDocTable2Col(data['Machine']['elements'],
"Machine Node Classifications",
"MachineClass" )
printAsciiDocFooter()
else:
print "Unsupported output format: %s" % args.output_format
exit(1)
# DEBUGGING OUTPUT
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(data['Application']['types'])
#pp.pprint(data['Software']['elements'])
|
{
"content_hash": "549cedfe4e50bbc0f237da8c95963663",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 214,
"avg_line_length": 39.905817174515235,
"alnum_prop": 0.5396362626683326,
"repo_name": "arago/MARS-Schema",
"id": "bbbf7fe1ce31d7c185021c4fd36014c94d51454b",
"size": "14504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/extract_classes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2171"
},
{
"name": "Python",
"bytes": "14504"
},
{
"name": "XSLT",
"bytes": "25122"
}
],
"symlink_target": ""
}
|
r"""
This paver file is intended to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source bootstrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setup.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
from __future__ import division, print_function
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
# - paver + virtualenv
# - full texlive
import os
import sys
import shutil
import subprocess
import re
import hashlib
import paver
from paver.easy import \
options, Bunch, task, call_task, sh, needs, cmdopts, dry
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
from numpy.version import git_revision as GIT_REVISION
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE_NOTES = 'doc/release/1.15.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'maintenance/1.14.x'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
DEFAULT_PYTHON = "2.7"
# Where to put the final installers, as put on sourceforge
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.1.3", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack"),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")
),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=DEFAULT_PYTHON),
bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
)
MPKG_PYTHON = {
"2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"],
"2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"],
"3.2": ["/Library/Frameworks/Python.framework/Versions/3.2/bin/python3"],
"3.3": ["/Library/Frameworks/Python.framework/Versions/3.3/bin/python3"],
"3.4": ["/Library/Frameworks/Python.framework/Versions/3.4/bin/python3"],
}
SSE3_CFG = {'ATLAS': r'C:\local\lib\atlas\sse3'}
SSE2_CFG = {'ATLAS': r'C:\local\lib\atlas\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\atlas\nosse', 'LAPACK': r'C:\local\lib\atlas\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
if sys.platform =="darwin":
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
}
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
elif sys.platform == "win32":
WINDOWS_PYTHON = {
"3.4": [r"C:\Python34\python.exe"],
"2.7": [r"C:\Python27\python.exe"],
}
# XXX: find out which env variable is necessary to avoid the pb with python
# 2.6 and random module when importing tempfile
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
else:
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
}
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
#-------------------
# Windows installers
#-------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# When git is installed on OS X but not under Wine, the name of the
# .exe has "-Unknown" in it instead of the correct git revision.
# Try to fix this here:
revidx = source.index(".dev-") + 5
gitrev = source[revidx:revidx+7]
os.rename(source.replace(gitrev, "Unknown"), target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
idirs = options.installers.installersdir
pyver = options.python_version
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(idirs):
os.makedirs(idirs)
source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
pyver = options.bdist_wininst_simple.python_version
_bdist_wininst(pyver)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
subprocess.check_call(cmd, env=cfg_env)
#----------------
# Bootstrap stuff
#----------------
@task
def bootstrap(options):
"""create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError as e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = False
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke(options):
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#---------------------
# Documentation tasks
#---------------------
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
bdir = os.path.join("doc", options.sphinx.builddir, "html")
if os.path.exists(bdir):
shutil.rmtree(bdir)
subprocess.check_call(["make", "html"], cwd="doc")
html_destdir = options.html.builddir
if os.path.exists(html_destdir):
shutil.rmtree(html_destdir)
shutil.copytree(bdir, html_destdir)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
sdir = options.doc.sdir
bdir = options.doc.bdir
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
user = os.path.join(bdir_latex, "numpy-user.pdf")
shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
ref = os.path.join(bdir_latex, "numpy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
#------------------
# Mac OS X targets
#------------------
def dmg_name(fullversion, pyver, osxver=None):
"""Return name for dmg installer.
Notes
-----
Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
(i386, x86_64). All other Python versions at python.org at the moment
have binaries for 10.3 only. The "macosx%s" part of the dmg name should
correspond to the python.org naming scheme.
"""
# assume that for the py2.7/osx10.6 build the deployment target is set
# (should be done in the release script).
if not osxver:
osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
return "numpy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
osxver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile(r"ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
# Note that bdist_mpkg breaks this if building a dev version with a git
# commit string attached. make_fullplatcomponents() in
# bdist_mpkg/cmd_bdist_mpkg.py replaces '-' with '_', comment this out if
# needed.
return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def _build_mpkg(pyver):
# account for differences between Python 2.7.1 versions from python.org
if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
else:
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
@task
def simple_dmg():
pyver = "2.6"
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
@task
def bdist_mpkg(options):
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = options.python_version
_build_mpkg(pyver)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
if os.path.exists(image_name):
os.remove(image_name)
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
@task
@cmdopts([("python-version=", "p", "python version")])
def dmg(options):
try:
pyver = options.dmg.python_version
except Exception:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
# Check if docs exist. If not, say so and quit.
ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
if (not os.path.exists(ref)) or (not os.path.exists(user)):
import warnings
warnings.warn("Docs need to be built first! Can't find them.", stacklevel=2)
# Build the mpkg package
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/numpy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "numpy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
def _compute_hash(idirs, algo):
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
with open(f, 'r') as _file:
m = algo(_file.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def compute_md5(idirs):
return _compute_hash(idirs, hashlib.md5)
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
def write_release_task(options, filename='README'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename)
if target.exists():
target.remove()
tmp_target = paver.path.path(filename + '.md')
source.copy(tmp_target)
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines("""
Checksums
=========
MD5
---
""")
ftarget.writelines([' %s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
------
""")
ftarget.writelines([' %s\n' % c for c in compute_sha256(idirs)])
# Sign release
cmd = ['gpg', '--clearsign', '--armor']
if hasattr(options, 'gpg_key'):
cmd += ['--default-key', options.gpg_key]
cmd += ['--output', str(target), str(tmp_target)]
subprocess.check_call(cmd)
print("signed %s" % (target,))
# Change PR links for github posting, don't sign this
# as the signing isn't markdown compatible.
with open(str(tmp_target), 'r') as ftarget:
mdtext = ftarget.read()
mdtext = re.sub(r'^\* `(\#[0-9]*).*?`__', r'* \1', mdtext, flags=re.M)
with open(str(tmp_target), 'w') as ftarget:
ftarget.write(mdtext)
def write_log_task(options, filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '--no-merges', '--use-mailmap',
'%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release(options):
write_release_task(options)
@task
def write_log(options):
write_log_task(options)
@task
def write_release_and_log(options):
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'README'))
write_log_task(options, os.path.join(rdir, 'Changelog'))
|
{
"content_hash": "3fea761b8c9f01f929ca6c2b1fe729dc",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 101,
"avg_line_length": 33.59567901234568,
"alnum_prop": 0.6291226458429031,
"repo_name": "tynn/numpy",
"id": "0065c142b411ead82c32ed55d6f5f0f80fb3455b",
"size": "21770",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pavement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8111444"
},
{
"name": "C++",
"bytes": "165060"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6639339"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.pubsub`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.hooks.pubsub import PubSubException, PubSubHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.pubsub`.",
DeprecationWarning, stacklevel=2
)
|
{
"content_hash": "3d065c014f81b9bf7bde1d9a211de512",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 91,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.7763157894736842,
"repo_name": "wooga/airflow",
"id": "e04f3cd6712e1c4aff807806cfc9eea52caf05c4",
"size": "1167",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/gcp_pubsub_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod
from aioworkers.core.base import AbstractNamedEntity
class AbstractBaseStorage(AbstractNamedEntity):
@abstractmethod
async def raw_key(self, key):
raise NotImplementedError()
class AbstractStorageReadOnly(AbstractBaseStorage):
@abstractmethod
async def get(self, key):
raise NotImplementedError()
class AbstractFindStorage(AbstractBaseStorage):
@abstractmethod
async def find(self, *args, **kwargs):
raise NotImplementedError()
class AbstractStorageWriteOnly(AbstractBaseStorage):
@abstractmethod
async def set(self, key, value):
raise NotImplementedError()
class AbstractStorage(AbstractStorageReadOnly, AbstractStorageWriteOnly):
async def copy(self, key_source, storage_dest, key_dest):
data = await self.get(key_source)
await storage_dest.set(key_dest, data)
return data is None
async def move(self, key_source, storage_dest, key_dest):
result = await self.copy(key_source, storage_dest, key_dest)
if result:
await self.set(key_source, None)
return result
class AbstractListedStorage(AbstractStorage):
@abstractmethod
async def list(self):
raise NotImplementedError()
@abstractmethod
async def length(self):
raise NotImplementedError()
class AbstractExpiryStorage(AbstractStorage):
@abstractmethod
async def expiry(self, key, expiry):
raise NotImplementedError()
class FieldStorageMixin(AbstractStorage):
model = dict
async def get(self, key, *, field=None, fields=None):
value = await super().get(key)
if field:
return value[field]
elif fields:
m = self.model()
for f in fields:
m[f] = value[f]
return m
else:
return value
async def set(self, key, value, *, field=None, fields=None):
if field or fields:
m = await super().get(key)
if field:
m[field] = value
else:
for f in fields:
m[f] = value[f]
value = m
await super().set(key, value)
|
{
"content_hash": "06d01426f99479f07ce94a35eb72aef1",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 73,
"avg_line_length": 26.626506024096386,
"alnum_prop": 0.6330316742081448,
"repo_name": "aioworkers/aioworkers",
"id": "69a164060dea57c1688517cb44384ddb6326f452",
"size": "2210",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aioworkers/storage/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "Python",
"bytes": "220824"
}
],
"symlink_target": ""
}
|
from flask import Flask
from .config import configs
from .extensions import db, api, login_manager, mail, bootstrap, debug_toolbar
from . import auth, main, rest
def configure_appliaction(app, config_name):
"""Configure the application"""
if not config_name in configs.keys():
print(' * Invalid config name "%s", using "default"' % config_name)
config_name = 'default'
config_object = configs[config_name]
app.config.from_object(config_object)
config_object.init_app(app)
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(auth.views.auth)
app.register_blueprint(main.views.main)
def register_extensions(app):
"""Register extensions to application"""
db.init_app(app)
api.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
debug_toolbar.init_app(app)
def create_app(config_name='default'):
"""An application factory function,"""
app = Flask(__name__)
configure_appliaction(app, config_name)
register_blueprints(app)
register_extensions(app)
return app
|
{
"content_hash": "b153c3963df6e226f4348ec6973d9e31",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 28.94871794871795,
"alnum_prop": 0.691762621789194,
"repo_name": "hoxm/myFamily",
"id": "b408681327f46ba5f398fefe7d03e6b1e2187430",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myfamily/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "130"
},
{
"name": "HTML",
"bytes": "9040"
},
{
"name": "JavaScript",
"bytes": "704407"
},
{
"name": "Python",
"bytes": "30386"
}
],
"symlink_target": ""
}
|
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
200 CMD_REDIRECT_OUTPUT JAVA streams to redirect as string -
'STDOUT' (redirect only STDOUT)
'STDERR' (redirect only STDERR)
'STDOUT STDERR' (redirect both streams)
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import itertools
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, \
IS_PY36_OR_GREATER, STATE_RUN, dict_keys, ASYNC_EVAL_TIMEOUT_SEC, IS_IRONPYTHON, GlobalDebuggerHolder, \
get_global_debugger, GetGlobalDebugger, set_global_debugger # Keep for backward compatibility @UnusedImport
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
if IS_IRONPYTHON:
# redefine `unquote` for IronPython, since we use it only for logging messages, but it leads to SOF with IronPython
def unquote(s):
return s
import pydevconsole
from _pydevd_bundle import pydevd_vars
import pydevd_tracing
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_vm_type
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
import pydevd_file_utils
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs_key, to_string
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
try:
import cStringIO as StringIO #may not always be available @UnusedImport
except:
try:
import StringIO #@Reimport
except:
import io as StringIO
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE
get_file_type = DONT_TRACE.get
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_LOAD_FULL_VALUE = 151
CMD_GET_THREAD_STACK = 152
# This is mostly for unit-tests to diagnose errors on ci.
CMD_THREAD_DUMP_TO_STDERR = 153
# Sent from the client to signal that we should stop when we start executing user code.
CMD_STOP_ON_START = 154
# When the debugger is stopped in an exception, this command will provide the details of the current exception (in the current thread).
CMD_GET_EXCEPTION_DETAILS = 155
CMD_REDIRECT_OUTPUT = 200
CMD_GET_NEXT_STATEMENT_TARGETS = 201
CMD_SET_PROJECT_ROOTS = 202
CMD_VERSION = 501
CMD_RETURN = 502
CMD_SET_PROTOCOL = 503
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'150': 'CMD_SHOW_CYTHON_WARNING',
'151': 'CMD_LOAD_FULL_VALUE',
'152': 'CMD_GET_THREAD_STACK',
'153': 'CMD_THREAD_DUMP_TO_STDERR',
'154': 'CMD_STOP_ON_START',
'155': 'CMD_GET_EXCEPTION_DETAILS',
'200': 'CMD_REDIRECT_OUTPUT',
'201': 'CMD_GET_NEXT_STATEMENT_TARGETS',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'503': 'CMD_SET_PROTOCOL',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
filesystem_encoding_is_utf8 = file_system_encoding.lower() in ('utf-8', 'utf_8', 'utf8')
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None, apply_to_pydevd_thread=True) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write(u'debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find(u'\n') != -1:
command, read_buffer = read_buffer.split(u'\n', 1)
args = command.split(u'\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
try:
while True:
try:
try:
cmd = self.cmdQueue.get(1, 0.1)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
cmd.send(self.sock)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
# Set TCP keepalive on an open socket.
# It activates after 1 second (TCP_KEEPIDLE,) of idleness,
# then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),
# and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds
try:
from socket import IPPROTO_TCP, SO_KEEPALIVE, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1)
s.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 1)
s.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3)
s.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5)
except ImportError:
pass # May not be available everywhere.
try:
s.settimeout(10) # 10 seconds timeout
s.connect((host, port))
s.settimeout(None) # no timeout after connected
pydevd_log(1, "Connected.")
return s
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
raise
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
# Protocol where each line is a new message (text is quoted to prevent new lines).
QUOTED_LINE_PROTOCOL = 'quoted-line'
# Uses http protocol to provide a new message.
# i.e.: Content-Length:xxx\r\n\r\npayload
HTTP_PROTOCOL = 'http'
protocol = QUOTED_LINE_PROTOCOL
_showing_debug_info = 0
_show_debug_info_lock = threading.RLock()
def __init__(self, cmd_id, seq, text):
"""
If sequence is 0, new sequence will be generated (otherwise, this was the response
to a command from the client).
"""
self.id = cmd_id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
if IS_PY2:
if isinstance(text, unicode):
text = text.encode('utf-8')
else:
assert isinstance(text, str)
else:
assert isinstance(text, str)
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
self._show_debug_info(cmd_id, seq, text)
if self.protocol == self.HTTP_PROTOCOL:
msg = '%s\t%s\t%s\n' % (cmd_id, seq, text)
else:
encoded = quote(to_string(text), '/<>_=" \t')
msg = '%s\t%s\t%s\n' % (cmd_id, seq, encoded)
if IS_PY2:
assert isinstance(msg, str) # i.e.: bytes
as_bytes = msg
else:
if isinstance(msg, str):
msg = msg.encode('utf-8')
assert isinstance(msg, bytes)
as_bytes = msg
self._as_bytes = as_bytes
def send(self, sock):
as_bytes = self._as_bytes
if self.protocol == self.HTTP_PROTOCOL:
sock.sendall(('Content-Length: %s\r\n\r\n' % len(as_bytes)).encode('ascii'))
sock.sendall(as_bytes)
@classmethod
def _show_debug_info(cls, cmd_id, seq, text):
with cls._show_debug_info_lock:
# Only one thread each time (rlock).
if cls._showing_debug_info:
# avoid recursing in the same thread (just printing could create
# a new command when redirecting output).
return
cls._showing_debug_info += 1
try:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(str(cmd_id), 'UNKNOWN')
out_message += ' '
out_message += text.replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
finally:
cls._showing_debug_info -= 1
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_protocol_set_message(self, seq):
return NetCommand(CMD_SET_PROTOCOL, seq, '')
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
threads = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for thread in threads:
if is_thread_alive(thread) and not getattr(thread, 'is_pydev_daemon_thread', False):
append(self._thread_to_xml(thread))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_thread_stack_message(self, seq, thread_id, topmost_frame):
"""Returns thread stack as XML """
try:
# If frame is None, the return is an empty frame list.
cmd_text = ['<xml><thread id="%s">' % (thread_id,)]
if topmost_frame is not None:
try:
# Note: if we detect that we're already stopped in a given place within
# the debugger, use that stack instead of creating a new one with the
# current position (this is needed because when an uncaught exception
# is reported for a given frame we are actually stopped in a different
# place within the debugger).
frame = topmost_frame
thread_stack_str = ''
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
thread_stack_str = frame.f_locals.get('thread_stack_str')
break
frame = frame.f_back
cmd_text.append(thread_stack_str or self.make_thread_stack_str(topmost_frame))
finally:
topmost_frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_THREAD_STACK, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= '))
return NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_stack_str(self, frame):
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
curr_frame = frame
frame = None # Clear frame reference
try:
while curr_frame:
my_id = id(curr_frame)
if curr_frame.f_code is None:
break # Iron Python sometimes does not have it!
method_name = curr_frame.f_code.co_name # method name (if in method) or ? if global
if method_name is None:
break # Iron Python sometimes does not have it!
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
if get_file_type(abs_path_real_path_and_base[2]) == PYDEV_FILE:
# Skip pydevd files.
curr_frame = curr_frame.f_back
continue
filename_in_utf8 = pydevd_file_utils.norm_file_to_client(abs_path_real_path_and_base[0])
if not filesystem_encoding_is_utf8 and hasattr(filename_in_utf8, "decode"):
# filename_in_utf8 is a byte string encoded using the file system encoding
# convert it to utf8
filename_in_utf8 = filename_in_utf8.decode(file_system_encoding).encode("utf-8")
# print("file is ", filename_in_utf8)
lineno = str(curr_frame.f_lineno)
# print("line is ", lineno)
# Note: variables are all gotten 'on-demand'.
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(method_name)))
append('file="%s" line="%s">' % (quote(make_valid_xml_value(filename_in_utf8), '/>_= \t'), lineno))
append("</frame>")
curr_frame = curr_frame.f_back
except:
traceback.print_exc()
curr_frame = None # Clear frame reference
return ''.join(cmd_text_list)
def make_thread_suspend_str(
self,
thread_id,
frame,
stop_reason=None,
message=None,
suspend_type="trace",
):
"""
:return tuple(str,str):
Returns tuple(thread_suspended_str, thread_stack_str).
i.e.:
(
'''
<xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
</frame>
</thread>
</xml>
'''
,
'''
<frame id="id" name="functionName " file="file" line="line">
</frame>
'''
)
"""
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
cmd_text_list.append('<xml>')
if message:
message = make_valid_xml_value(message)
append('<thread id="%s"' % (thread_id,))
if stop_reason is not None:
append(' stop_reason="%s"' % (stop_reason,))
if message is not None:
append(' message="%s"' % (message,))
if suspend_type is not None:
append(' suspend_type="%s"' % (suspend_type,))
append('>')
thread_stack_str = self.make_thread_stack_str(frame)
append(thread_stack_str)
append("</thread></xml>")
return ''.join(cmd_text_list), thread_stack_str
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message, suspend_type):
try:
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
thread_id, frame, stop_reason, message, suspend_type)
cmd = NetCommand(CMD_THREAD_SUSPEND, 0, thread_suspend_str)
cmd.thread_stack_str = thread_stack_str
return cmd
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def _make_send_curr_exception_trace_str(self, thread_id, exc_type, exc_desc, trace_obj):
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return exc_type, exc_desc, thread_suspend_str, thread_stack_str
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
exc_type, exc_desc, thread_suspend_str, _thread_stack_str = self._make_send_curr_exception_trace_str(
thread_id, exc_type, exc_desc, trace_obj)
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + thread_suspend_str
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_exception_details_message(self, seq, thread_id, topmost_frame):
"""Returns exception details as XML """
try:
# If the debugger is not suspended, just return the thread and its id.
cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]
if topmost_frame is not None:
try:
frame = topmost_frame
topmost_frame = None
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
arg = frame.f_locals.get('arg', None)
if arg is not None:
exc_type, exc_desc, _thread_suspend_str, thread_stack_str = self._make_send_curr_exception_trace_str(
thread_id, *arg)
cmd_text.append('exc_type="%s" ' % (exc_type,))
cmd_text.append('exc_desc="%s" ' % (exc_desc,))
cmd_text.append('>')
cmd_text.append(thread_stack_str)
break
frame = frame.f_back
else:
cmd_text.append('>')
finally:
frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
thread_suspended_str, _thread_stack_str = self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, '')
return NetCommand(CMD_SHOW_CONSOLE, 0, thread_suspended_str)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, str(started))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
try:
message = str(is_success) + '\t' + exception_msg
return NetCommand(CMD_SET_NEXT_STATEMENT, int(seq), message)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_load_full_value_message(self, seq, payload):
try:
return NetCommand(CMD_LOAD_FULL_VALUE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
def make_get_next_statement_targets_message(self, seq, payload):
try:
return NetCommand(CMD_GET_NEXT_STATEMENT_TARGETS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if module_name not in sys.modules:
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if new_module_name in sys.modules:
module_name = new_module_name
if module_name not in sys.modules:
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name, seq=0):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
self.seq = seq
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
t.additional_info.pydev_message = str(self.seq)
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = StringIO.StringIO()
xml.write("<xml>")
_typeName, val_dict = pydevd_vars.resolve_compound_variable_fields(self.thread_id, self.frame_id, self.scope, self.attributes)
if val_dict is None:
val_dict = {}
# assume properly ordered if resolver returns 'OrderedDict'
# check type as string to support OrderedDict backport for older Python
keys = dict_keys(val_dict)
if not (_typeName == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER):
keys.sort(key=compare_object_attrs_key)
for k in keys:
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_xml.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml.getvalue())
xml.close()
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(
self.sequence, "Error resolving variables %s" % (get_exception_traceback_str(),))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetNextStatementTargets
#=======================================================================================================================
class InternalGetNextStatementTargets(InternalThreadCommand):
""" gets the valid line numbers for use with set next statement """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into set of line numbers """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
code = frame.f_code
xml = "<xml>"
if hasattr(code, 'co_lnotab'):
lineno = code.co_firstlineno
lnotab = code.co_lnotab
for i in itertools.islice(lnotab, 1, len(lnotab), 2):
if isinstance(i, int):
lineno = lineno + i
else:
# in python 2 elements in co_lnotab are of type str
lineno = lineno + ord(i)
xml += "<line>%d</line>" % (lineno,)
else:
xml += "<line>%d</line>" % (frame.f_lineno,)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_next_statement_targets_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# InternalLoadFullValue
#=======================================================================================================================
class InternalLoadFullValue(InternalThreadCommand):
"""
Loads values asynchronously
"""
def __init__(self, seq, thread_id, frame_id, vars):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.vars = vars
def do_it(self, dbg):
"""Starts a thread that will load values asynchronously"""
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd)
class AbstractGetValueAsyncThread(PyDBDaemonThread):
"""
Abstract class for a thread, which evaluates values for async variables
"""
def __init__(self, frame_accessor, seq, var_objects):
PyDBDaemonThread.__init__(self)
self.frame_accessor = frame_accessor
self.seq = seq
self.var_objs = var_objects
self.cancel_event = threading.Event()
def send_result(self, xml):
raise NotImplementedError()
def _on_run(self):
start = time.time()
xml = StringIO.StringIO()
xml.write("<xml>")
for (var_obj, name) in self.var_objs:
current_time = time.time()
if current_time - start > ASYNC_EVAL_TIMEOUT_SEC or self.cancel_event.is_set():
break
xml.write(pydevd_xml.var_to_xml(var_obj, name, evaluate_full_value=True))
xml.write("</xml>")
self.send_result(xml)
xml.close()
class GetValueAsyncThreadDebug(AbstractGetValueAsyncThread):
"""
A thread for evaluation async values, which returns result for debugger
Create message and send it via writer thread
"""
def send_result(self, xml):
if self.frame_accessor is not None:
cmd = self.frame_accessor.cmd_factory.make_load_full_value_message(self.seq, xml.getvalue())
self.frame_accessor.writer.add_command(cmd)
class GetValueAsyncThreadConsole(AbstractGetValueAsyncThread):
"""
A thread for evaluation async values, which returns result for Console
Send result directly to Console's server
"""
def send_result(self, xml):
if self.frame_accessor is not None:
self.frame_accessor.ReturnFullValue(self.seq, xml.getvalue())
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
|
{
"content_hash": "12ae408f9e00787d6e16ec52bfb243ab",
"timestamp": "",
"source": "github",
"line_count": 1790,
"max_line_length": 191,
"avg_line_length": 42.84804469273743,
"alnum_prop": 0.5118125635609794,
"repo_name": "SlicerRt/SlicerDebuggingTools",
"id": "0bb122b2d37a6eeb1177c15026b945b08aa1020c",
"size": "76698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "406"
},
{
"name": "C",
"bytes": "13361"
},
{
"name": "C++",
"bytes": "105521"
},
{
"name": "CMake",
"bytes": "21408"
},
{
"name": "Cython",
"bytes": "69580"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Python",
"bytes": "3900091"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
}
|
"""
Station Plot
============
Make a station plot, complete with sky cover and weather symbols.
The station plot itself is pretty straightforward, but there is a bit of code to perform the
data-wrangling (hopefully that situation will improve in the future). Certainly, if you have
existing point data in a format you can work with trivially, the station plot will be simple.
"""
import cartopy.crs as ccrs
import cartopy.feature as feat
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import get_wind_components
from metpy.cbook import get_test_data
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, sky_cover
from metpy.units import units
###########################################
# The setup
# ---------
#
# First read in the data. We use `numpy.loadtxt` to read in the data and use a structured
# `numpy.dtype` to allow different types for the various columns. This allows us to handle
# the columns with string data.
f = get_test_data('station_data.txt')
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
###########################################
# This sample data has *way* too many stations to plot all of them. Instead, we just select
# a few from around the U.S. and pull those out of the data file.
# Get the full list of stations in the data
all_stids = [s.decode('ascii') for s in all_data['stid']]
# Pull out these specific stations
whitelist = ['OKC', 'ICT', 'GLD', 'MEM', 'BOS', 'MIA', 'MOB', 'ABQ', 'PHX', 'TTF',
'ORD', 'BIL', 'BIS', 'CPR', 'LAX', 'ATL', 'MSP', 'SLC', 'DFW', 'NYC', 'PHL',
'PIT', 'IND', 'OLY', 'SYR', 'LEX', 'CHS', 'TLH', 'HOU', 'GJT', 'LBB', 'LSV',
'GRB', 'CLT', 'LNK', 'DSM', 'BOI', 'FSD', 'RAP', 'RIC', 'JAN', 'HSV', 'CRW',
'SAT', 'BUY', '0CO', 'ZPC', 'VIH']
# Loop over all the whitelisted sites, grab the first data, and concatenate them
data = np.concatenate([all_data[all_stids.index(site)].reshape(1,) for site in whitelist])
###########################################
# Now that we have the data we want, we need to perform some conversions:
#
# - Get a list of strings for the station IDs
# - Get wind components from speed and direction
# - Convert cloud fraction values to integer codes [0 - 8]
# - Map METAR weather codes to WMO codes for weather symbols
# Get all of the station IDs as a list of strings
stid = [s.decode('ascii') for s in data['stid']]
# Get the wind components, converting from m/s to knots as will be appropriate
# for the station plot
u, v = get_wind_components((data['wind_speed'] * units('m/s')).to('knots'),
data['wind_dir'] * units.degree)
# Convert the fraction value into a code of 0-8, which can be used to pull out
# the appropriate symbol
cloud_frac = (8 * data['cloud_fraction']).astype(int)
# Map weather strings to WMO codes, which we can use to convert to symbols
# Only use the first symbol if there are multiple
wx_text = [s.decode('ascii') for s in data['weather']]
wx_codes = {'': 0, 'HZ': 5, 'BR': 10, '-DZ': 51, 'DZ': 53, '+DZ': 55,
'-RA': 61, 'RA': 63, '+RA': 65, '-SN': 71, 'SN': 73, '+SN': 75}
wx = [wx_codes[s.split()[0] if ' ' in s else s] for s in wx_text]
###########################################
# Now all the data wrangling is finished, just need to set up plotting and go
# Set up the map projection and set up a cartopy feature for state borders
proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35,
standard_parallels=[35])
state_boundaries = feat.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lines',
scale='110m', facecolor='none')
###########################################
# The payoff
# ----------
# Change the DPI of the resulting figure. Higher DPI drastically improves the
# look of the text rendering
plt.rcParams['savefig.dpi'] = 255
# Create the figure and an axes set to the projection
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable
ax.add_feature(feat.LAND, zorder=-1)
ax.add_feature(feat.OCEAN, zorder=-1)
ax.add_feature(feat.LAKES, zorder=-1)
ax.coastlines(resolution='110m', zorder=2, color='black')
ax.add_feature(state_boundaries)
ax.add_feature(feat.BORDERS, linewidth='2', edgecolor='black')
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['lon'], data['lat'], transform=ccrs.PlateCarree(),
fontsize=12)
# Plot the temperature and dew point to the upper and lower left, respectively, of
# the center point. Each one uses a different color.
stationplot.plot_parameter('NW', data['air_temperature'], color='red')
stationplot.plot_parameter('SW', data['dewpoint'], color='darkgreen')
# A more complex example uses a custom formatter to control how the sea-level pressure
# values are plotted. This uses the standard trailing 3-digits of the pressure value
# in tenths of millibars.
stationplot.plot_parameter('NE', data['slp'],
formatter=lambda v: format(10 * v, '.0f')[-3:])
# Plot the cloud cover symbols in the center location. This uses the codes made above and
# uses the `sky_cover` mapper to convert these values to font codes for the
# weather symbol font.
stationplot.plot_symbol('C', cloud_frac, sky_cover)
# Same this time, but plot current weather to the left of center, using the
# `current_weather` mapper to convert symbols to the right glyphs.
stationplot.plot_symbol('W', wx, current_weather)
# Add wind barbs
stationplot.plot_barb(u, v)
# Also plot the actual text of the station id. Instead of cardinal directions,
# plot further out by specifying a location of 2 increments in x and 0 in y.
stationplot.plot_text((2, 0), stid)
plt.show()
|
{
"content_hash": "a8a402b23ce84fc3e57aa7af3dc3fae3",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 93,
"avg_line_length": 43.390728476821195,
"alnum_prop": 0.6292735042735043,
"repo_name": "metpy/MetPy",
"id": "ee839a853b33b39a636f2654c4f68624c630bf72",
"size": "6695",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "v0.4/_downloads/Station_Plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "989941"
},
{
"name": "Python",
"bytes": "551868"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('venta', '0028_auto_20160408_1555'),
]
operations = [
migrations.AddField(
model_name='product',
name='cellar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='venta.Cellar', verbose_name='Bodega'),
),
]
|
{
"content_hash": "23808528a65e66263898644d7aa2d0e5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 146,
"avg_line_length": 26.526315789473685,
"alnum_prop": 0.6388888888888888,
"repo_name": "vpadillar/pventa",
"id": "e7c6a078a683f3384b53414d5a82f335b5bf5468",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venta/migrations/0029_product_cellar.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "491"
},
{
"name": "CSS",
"bytes": "87140"
},
{
"name": "Groff",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "47212"
},
{
"name": "JavaScript",
"bytes": "177804"
},
{
"name": "Python",
"bytes": "201594"
},
{
"name": "SQLPL",
"bytes": "1006"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("studies", "0006_study_comments")]
operations = [
migrations.AlterField(
model_name="study",
name="comments",
field=models.TextField(blank=True, null=True),
)
]
|
{
"content_hash": "7830cc82ce09b1c514fb719f15b0d724",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.9375,
"alnum_prop": 0.6185286103542235,
"repo_name": "CenterForOpenScience/lookit-api",
"id": "0be17c4652d0c9b54e2c0e17d1c568a279a86258",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "studies/migrations/0007_auto_20170626_1504.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11022"
},
{
"name": "HTML",
"bytes": "185393"
},
{
"name": "Python",
"bytes": "481700"
},
{
"name": "Shell",
"bytes": "1166"
}
],
"symlink_target": ""
}
|
import logging
import os
import click
from platformio import fs
from platformio.package.manager.library import LibraryPackageManager
from platformio.package.manager.platform import PlatformPackageManager
from platformio.package.manager.tool import ToolPackageManager
from platformio.package.meta import PackageSpec
from platformio.project.config import ProjectConfig
from platformio.project.savedeps import pkg_to_save_spec, save_project_dependencies
@click.command(
"update", short_help="Update the project dependencies or custom packages"
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option("-e", "--environment", "environments", multiple=True)
@click.option("-p", "--platform", "platforms", metavar="SPECIFICATION", multiple=True)
@click.option("-t", "--tool", "tools", metavar="SPECIFICATION", multiple=True)
@click.option("-l", "--library", "libraries", metavar="SPECIFICATION", multiple=True)
@click.option(
"--no-save",
is_flag=True,
help="Prevent saving specified packages to `platformio.ini`",
)
@click.option("--skip-dependencies", is_flag=True, help="Skip package dependencies")
@click.option("-g", "--global", is_flag=True, help="Update global packages")
@click.option(
"--storage-dir",
default=None,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Custom Package Manager storage for global packages",
)
@click.option("-s", "--silent", is_flag=True, help="Suppress progress reporting")
def package_update_cmd(**options):
if options.get("global"):
update_global_dependencies(options)
else:
update_project_dependencies(options)
def update_global_dependencies(options):
pm = PlatformPackageManager(options.get("storage_dir"))
tm = ToolPackageManager(options.get("storage_dir"))
lm = LibraryPackageManager(options.get("storage_dir"))
for obj in (pm, tm, lm):
obj.set_log_level(logging.WARN if options.get("silent") else logging.DEBUG)
for spec in options.get("platforms"):
pm.update(
from_spec=spec,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
for spec in options.get("tools"):
tm.update(
from_spec=spec,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
for spec in options.get("libraries", []):
lm.update(
from_spec=spec,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
def update_project_dependencies(options):
environments = options["environments"]
with fs.cd(options["project_dir"]):
config = ProjectConfig.get_instance()
config.validate(environments)
for env in config.envs():
if environments and env not in environments:
continue
if not options["silent"]:
click.echo(
"Resolving %s environment packages..." % click.style(env, fg="cyan")
)
already_up_to_date = not update_project_env_dependencies(env, options)
if not options["silent"] and already_up_to_date:
click.secho("Already up-to-date.", fg="green")
def update_project_env_dependencies(project_env, options=None):
options = options or {}
updated_conds = []
# custom platforms
if options.get("platforms"):
updated_conds.append(_update_project_env_custom_platforms(project_env, options))
# custom tools
if options.get("tools"):
updated_conds.append(_update_project_env_custom_tools(project_env, options))
# custom ibraries
if options.get("libraries"):
updated_conds.append(_update_project_env_custom_libraries(project_env, options))
# declared dependencies
if not updated_conds:
updated_conds = [
_update_project_env_platform(project_env, options),
_update_project_env_libraries(project_env, options),
]
return any(updated_conds)
def _update_project_env_platform(project_env, options):
config = ProjectConfig.get_instance()
pm = PlatformPackageManager()
if options.get("silent"):
pm.set_log_level(logging.WARN)
spec = config.get(f"env:{project_env}", "platform")
if not spec:
return None
cur_pkg = pm.get_package(spec)
if not cur_pkg:
return None
new_pkg = PlatformPackageManager().update(
cur_pkg,
to_spec=spec,
project_env=project_env,
skip_dependencies=options.get("skip_dependencies"),
)
return cur_pkg != new_pkg
def _update_project_env_custom_platforms(project_env, options):
already_up_to_date = True
pm = PlatformPackageManager()
if not options.get("silent"):
pm.set_log_level(logging.DEBUG)
for spec in options.get("platforms"):
cur_pkg = pm.get_package(spec)
new_pkg = pm.update(
cur_pkg,
to_spec=spec,
project_env=project_env,
skip_dependencies=options.get("skip_dependencies"),
)
if cur_pkg != new_pkg:
already_up_to_date = False
return not already_up_to_date
def _update_project_env_custom_tools(project_env, options):
already_up_to_date = True
tm = ToolPackageManager()
if not options.get("silent"):
tm.set_log_level(logging.DEBUG)
specs_to_save = []
for tool in options.get("tools"):
spec = PackageSpec(tool)
cur_pkg = tm.get_package(spec)
new_pkg = tm.update(
cur_pkg,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
if cur_pkg != new_pkg:
already_up_to_date = False
specs_to_save.append(pkg_to_save_spec(new_pkg, spec))
if not options.get("no_save") and specs_to_save:
save_project_dependencies(
os.getcwd(),
specs_to_save,
scope="platform_packages",
action="add",
environments=[project_env],
)
return not already_up_to_date
def _update_project_env_libraries(project_env, options):
already_up_to_date = True
config = ProjectConfig.get_instance()
lm = LibraryPackageManager(
os.path.join(config.get("platformio", "libdeps_dir"), project_env)
)
if options.get("silent"):
lm.set_log_level(logging.WARN)
for library in config.get(f"env:{project_env}", "lib_deps"):
spec = PackageSpec(library)
# skip built-in dependencies
if not spec.external and not spec.owner:
continue
cur_pkg = lm.get_package(spec)
if cur_pkg:
new_pkg = lm.update(
cur_pkg,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
if cur_pkg != new_pkg:
already_up_to_date = False
return not already_up_to_date
def _update_project_env_custom_libraries(project_env, options):
already_up_to_date = True
config = ProjectConfig.get_instance()
lm = LibraryPackageManager(
os.path.join(config.get("platformio", "libdeps_dir"), project_env)
)
if not options.get("silent"):
lm.set_log_level(logging.DEBUG)
specs_to_save = []
for library in options.get("libraries") or []:
spec = PackageSpec(library)
cur_pkg = lm.get_package(spec)
new_pkg = lm.update(
cur_pkg,
to_spec=spec,
skip_dependencies=options.get("skip_dependencies"),
)
if cur_pkg != new_pkg:
already_up_to_date = False
specs_to_save.append(pkg_to_save_spec(new_pkg, spec))
if not options.get("no_save") and specs_to_save:
save_project_dependencies(
os.getcwd(),
specs_to_save,
scope="lib_deps",
action="add",
environments=[project_env],
)
return not already_up_to_date
|
{
"content_hash": "bfec1db5829c6c2458ad6f5473ad47b5",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 88,
"avg_line_length": 34.10084033613445,
"alnum_prop": 0.6232134056185313,
"repo_name": "platformio/platformio",
"id": "a520b7b3ffb4d6c40c3389db13c3acf6377890eb",
"size": "8727",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/package/commands/update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1826"
},
{
"name": "Makefile",
"bytes": "356"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "333618"
},
{
"name": "Smarty",
"bytes": "45408"
}
],
"symlink_target": ""
}
|
import os
from binascii import hexlify
from random import randint
from unittest import TestCase
from six import text_type, PY3
from six.moves import xrange
from bson import dumps, loads
def populate(parent, howmany, max_children):
to_add = howmany
if howmany > max_children:
children = randint(2, max_children)
distribution = []
for i in xrange(0, children - 1):
distribution.append(int(howmany / children))
distribution.append(howmany - sum(distribution, 0))
for i in xrange(0, children):
steal_target = randint(0, children - 1)
while steal_target == i:
steal_target = randint(0, children -1)
steal_count = int(randint(-1 * distribution[i],
distribution[steal_target]) / 2)
distribution[i] += steal_count
distribution[steal_target] -= steal_count
for i in xrange(0, children):
make_dict = randint(0, 1)
baby = None
if make_dict:
baby = {}
else:
baby = []
populate(baby, distribution[i], max_children)
if isinstance(parent, dict):
key = os.urandom(8)
key = "".join(chr(c) for c in hexlify(key)) if PY3 else key.encode("hex")
parent[key] = baby
else:
parent.append(baby)
else:
populate_with_leaves(parent, howmany)
def populate_with_leaves(parent, howmany):
for i in xrange(0, howmany):
leaf = os.urandom(4)
leaf = "".join(chr(c) for c in hexlify(leaf)) if PY3 else leaf.encode("hex")
make_unicode = randint(0, 1)
if make_unicode:
leaf = text_type(leaf)
if isinstance(parent, dict):
key = os.urandom(4)
key = "".join(chr(c) for c in hexlify(key)) if PY3 else key.encode("hex")
parent[key] = leaf
else:
parent.append(leaf)
class TestRandomTree(TestCase):
def test_random_tree(self):
for i in xrange(0, 16):
p = {}
populate(p, 256, 4)
sp = dumps(p)
p2 = loads(sp)
self.assertEquals(p, p2)
|
{
"content_hash": "222242314a388fcff45badc883062dbb",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 89,
"avg_line_length": 32.391304347826086,
"alnum_prop": 0.545413870246085,
"repo_name": "hisery/bson",
"id": "ea6acf98f6947b9290eb5503876f8e4928e86ad2",
"size": "2235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bson/tests/test_random_tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26878"
}
],
"symlink_target": ""
}
|
import sequences.sequence as seq
from sequences.label_dictionary import *
import pdb
class _SequenceIterator(object):
def __init__(self, seq):
self.seq = seq
self.pos = 0
def __iter__(self):
return self
def next(self):
if self.pos >= len(self.seq):
raise StopIteration
r = self.seq[self.pos]
self.pos += 1
return r
class SequenceList(object):
def __init__(self, x_dict, pos_dict, ne_dict, stem_vocab={}):
self.x_dict = x_dict
self.y_dict = ne_dict
self.pos_dict = pos_dict
self.stem_vocabulary = stem_vocab
self.seq_list = []
def __str__(self):
return str(self.seq_list)
def __repr__(self):
return repr(self.seq_list)
def __len__(self):
return len(self.seq_list)
def __getitem__(self, ix):
return self.seq_list[ix]
def __iter__(self):
return _SequenceIterator(self)
def size(self):
'''Returns the number of sequences in the list.'''
return len(self.seq_list)
def get_num_tokens(self):
'''Returns the number of tokens in the sequence list, that is, the
sum of the length of the sequences.'''
return sum([seq.size() for seq in self.seq_list])
def add_sequence(self, x, y,pos,file_id='',br_pos_list=[]):
'''Add a sequence to the list, where x is the sequence of
observations, and y is the sequence of states.'''
num_seqs = len(self.seq_list)
x_ids = [self.x_dict.get_label_id(name) for name in x]
y_ids = [self.y_dict.get_label_id(name) for name in y]
pos_ids = [self.pos_dict.get_label_id(name) for name in pos]
self.seq_list.append(seq.Sequence(self, x_ids, y_ids, pos_ids, num_seqs,file_id,br_pos_list) )
def save(self,file):
seq_fn = open(file,"w")
for seq in self.seq_list:
txt = ""
for pos,word in enumerate(seq.x):
txt += "%i:%i\t"%(word,seq.y[pos])
seq_fn.write(txt.strip()+"\n")
seq_fn.close()
def load(self,file):
seq_fn = open(file,"r")
seq_list = []
for line in seq_fn:
seq_x = []
seq_y = []
entries = line.strip().split("\t")
for entry in entries:
x,y = entry.split(":")
seq_x.append(int(x))
seq_y.append(int(y))
self.add_sequence(seq_x,seq_y)
seq_fn.close()
|
{
"content_hash": "956f1c7f9916aea20fc5ba9075b33cbd",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 102,
"avg_line_length": 29.406976744186046,
"alnum_prop": 0.534994068801898,
"repo_name": "ronaldahmed/labor-market-demand-analysis",
"id": "7e71b2035d3fe727123eadbcf72e230a2f87ebe1",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shallow parsing models/sequences/sequence_list.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "125767"
},
{
"name": "CSS",
"bytes": "32608"
},
{
"name": "HTML",
"bytes": "235912337"
},
{
"name": "JavaScript",
"bytes": "23952"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Python",
"bytes": "1028412"
},
{
"name": "R",
"bytes": "38334"
}
],
"symlink_target": ""
}
|
""" Testing suite for the Longformer tokenizer. """
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
# Copied from transformers.tests.roberta.test_modeling_roberta.py with Roberta->Longformer
@require_tokenizers
class LongformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LongformerTokenizer
test_slow_tokenizer = True
rust_tokenizer_class = LongformerTokenizerFast
test_rust_tokenizer = True
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text) # , add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def longformer_dict_integration_testing(self):
tokenizer = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False),
[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode(
"sequence builders", add_special_tokens=True, add_prefix_space=False
)
encoded_pair_from_decode = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def test_space_encoding(self):
tokenizer = self.get_tokenizer()
sequence = "Encode this sequence."
space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]]
# Testing encoder arguments
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(first_char, space_encoding)
tokenizer.add_special_tokens({"bos_token": "<s>"})
encoded = tokenizer.encode(sequence, add_special_tokens=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(first_char, space_encoding)
# Testing spaces after special tokens
mask = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(mask, lstrip=True, rstrip=False)}
) # mask token has a left space
mask_ind = tokenizer.convert_tokens_to_ids(mask)
sequence = "Encode <mask> sequence"
sequence_nospace = "Encode <mask>sequence"
encoded = tokenizer.encode(sequence)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence_nospace)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(first_char, space_encoding)
def test_pretokenized_inputs(self):
pass
def test_embeded_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
)
tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
)
self.assertSequenceEqual(
tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
)
def test_change_add_prefix_space_and_trim_offsets_args(self):
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets
)
pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space)
self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space)
self.assertEqual(post_processor_state["trim_offsets"], trim_offsets)
def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
text = f"{text_of_1_token} {text_of_1_token}"
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)),
)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)),
)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)),
)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)),
)
text = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False
)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token)))
self.assertEqual(
encoding.offset_mapping[1],
(1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
)
|
{
"content_hash": "453fbb36951f6e448bb53c694459da63",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 113,
"avg_line_length": 48.491408934707906,
"alnum_prop": 0.592870810006378,
"repo_name": "huggingface/transformers",
"id": "2397a40bafa6b1fffe695593d1716cec32670c31",
"size": "14733",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/models/longformer/test_tokenization_longformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
""" Write a program that reads 2 numbers from sys.argv
and prints their sum.
Bonus: Print error messages for invalid inputs.
To print error messages we'll have to use a concept not yet learned in the
course, and which will only be presented later: Exceptions.
We'll tap into python's error handling and change its default
error message to something more meaningful.
"""
|
{
"content_hash": "1c55bbafe4fe2a26e135055fb8167fcc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 74,
"avg_line_length": 37.2,
"alnum_prop": 0.782258064516129,
"repo_name": "ynonp/python-examples-verint-2016-07",
"id": "95f24ba61b506a4b86404955d3332a4b96e6af8a",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "06_modules_lab/02_bonus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32213"
}
],
"symlink_target": ""
}
|
"""
Tests specific to the extended etree API
Tests that apply to the general ElementTree API should go into
test_elementtree
"""
import os.path
import unittest
import copy
import sys
import re
import gc
import operator
import tempfile
import gzip
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, StringIO, BytesIO, HelperTestCase, fileInTestDir, read_file
from common_imports import SillyFileLike, LargeFileLikeUnicode, doctest, make_doctest
from common_imports import canonicalize, sorted, _str, _bytes
print("")
print("TESTED VERSION: %s" % etree.__version__)
print(" Python: " + repr(sys.version_info))
print(" lxml.etree: " + repr(etree.LXML_VERSION))
print(" libxml used: " + repr(etree.LIBXML_VERSION))
print(" libxml compiled: " + repr(etree.LIBXML_COMPILED_VERSION))
print(" libxslt used: " + repr(etree.LIBXSLT_VERSION))
print(" libxslt compiled: " + repr(etree.LIBXSLT_COMPILED_VERSION))
print("")
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
class ETreeOnlyTestCase(HelperTestCase):
"""Tests only for etree, not ElementTree"""
etree = etree
def test_version(self):
self.assertTrue(isinstance(etree.__version__, _unicode))
self.assertTrue(isinstance(etree.LXML_VERSION, tuple))
self.assertEqual(len(etree.LXML_VERSION), 4)
self.assertTrue(isinstance(etree.LXML_VERSION[0], int))
self.assertTrue(isinstance(etree.LXML_VERSION[1], int))
self.assertTrue(isinstance(etree.LXML_VERSION[2], int))
self.assertTrue(isinstance(etree.LXML_VERSION[3], int))
self.assertTrue(etree.__version__.startswith(
str(etree.LXML_VERSION[0])))
def test_c_api(self):
if hasattr(self.etree, '__pyx_capi__'):
# newer Pyrex compatible C-API
self.assertTrue(isinstance(self.etree.__pyx_capi__, dict))
self.assertTrue(len(self.etree.__pyx_capi__) > 0)
else:
# older C-API mechanism
self.assertTrue(hasattr(self.etree, '_import_c_api'))
def test_element_names(self):
Element = self.etree.Element
el = Element('name')
self.assertEqual(el.tag, 'name')
el = Element('{}name')
self.assertEqual(el.tag, 'name')
def test_element_name_empty(self):
Element = self.etree.Element
el = Element('name')
self.assertRaises(ValueError, Element, '{}')
self.assertRaises(ValueError, setattr, el, 'tag', '{}')
self.assertRaises(ValueError, Element, '{test}')
self.assertRaises(ValueError, setattr, el, 'tag', '{test}')
def test_element_name_colon(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, 'p:name')
self.assertRaises(ValueError, Element, '{test}p:name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', 'p:name')
def test_element_name_quote(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, "p'name")
self.assertRaises(ValueError, Element, 'p"name')
self.assertRaises(ValueError, Element, "{test}p'name")
self.assertRaises(ValueError, Element, '{test}p"name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', "p'name")
self.assertRaises(ValueError, setattr, el, 'tag', 'p"name')
def test_element_name_space(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, ' name ')
self.assertRaises(ValueError, Element, 'na me')
self.assertRaises(ValueError, Element, '{test} name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', ' name ')
def test_subelement_name_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, '{}')
self.assertRaises(ValueError, SubElement, el, '{test}')
def test_subelement_name_colon(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'p:name')
self.assertRaises(ValueError, SubElement, el, '{test}p:name')
def test_subelement_name_quote(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, "p'name")
self.assertRaises(ValueError, SubElement, el, "{test}p'name")
self.assertRaises(ValueError, SubElement, el, 'p"name')
self.assertRaises(ValueError, SubElement, el, '{test}p"name')
def test_subelement_name_space(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, ' name ')
self.assertRaises(ValueError, SubElement, el, 'na me')
self.assertRaises(ValueError, SubElement, el, '{test} name')
def test_subelement_attribute_invalid(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'name', {'a b c' : 'abc'})
self.assertRaises(ValueError, SubElement, el, 'name', {'a' : 'a\0\n'})
self.assertEqual(0, len(el))
def test_qname_empty(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, '')
self.assertRaises(ValueError, QName, 'test', '')
def test_qname_colon(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, 'p:name')
self.assertRaises(ValueError, QName, 'test', 'p:name')
def test_qname_space(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, ' name ')
self.assertRaises(ValueError, QName, 'na me')
self.assertRaises(ValueError, QName, 'test', ' name')
def test_qname_namespace_localname(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
namespace, localname = 'http://myns', 'a'
qname = QName(namespace, localname)
self.assertEqual(namespace, qname.namespace)
self.assertEqual(localname, qname.localname)
def test_qname_element(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
qname1 = QName('http://myns', 'a')
a = self.etree.Element(qname1, nsmap={'p' : 'http://myns'})
qname2 = QName(a)
self.assertEqual(a.tag, qname1.text)
self.assertEqual(qname1.text, qname2.text)
self.assertEqual(qname1, qname2)
def test_qname_text_resolve(self):
# ET doesn't resove QNames as text values
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname, nsmap={'p' : 'http://myns'})
a.text = qname
self.assertEqual("p:a", a.text)
def test_nsmap_prefix_invalid(self):
etree = self.etree
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'"' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'&' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'a:b' : 'testns'})
def test_attribute_has_key(self):
# ET in Py 3.x has no "attrib.has_key()" method
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
self.assertEqual(
True, root.attrib.has_key('bar'))
self.assertEqual(
False, root.attrib.has_key('baz'))
self.assertEqual(
False, root.attrib.has_key('hah'))
self.assertEqual(
True,
root.attrib.has_key('{http://ns.codespeak.net/test}baz'))
def test_attribute_set(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
def test_attribute_set_invalid(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings
Element = self.etree.Element
root = Element("root")
self.assertRaises(TypeError, root.set, "newattr", 5)
self.assertRaises(TypeError, root.set, "newattr", None)
def test_strip_attributes(self):
XML = self.etree.XML
xml = _bytes('<test a="5" b="10" c="20"><x a="4" b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(_bytes('<test b="10" c="20"><x b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, 'b', 'c')
self.assertEqual(_bytes('<test a="5"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_attributes_ns(self):
XML = self.etree.XML
xml = _bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20" n:a="5"><x a="4" n:b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" b="10" c="20" n:a="5"><x n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}a', 'c')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10"><x a="4" n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}*')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_elements(self):
XML = self.etree.XML
xml = _bytes('<test><a><b><c/></b></a><x><a><b/><c/></a></x></test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test><x></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test><a></a><x><a></a></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'c')
self.assertEqual(_bytes('<test><a><b></b></a><x><a><b></b></a></x></test>'),
self._writeElement(root))
def test_strip_elements_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>C</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TEST<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c', with_tail=False)
self.assertEqual(_bytes('<test>TESTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<a>A<b>B<c/>CT</b>BT</a>AT<x>X<a>A<b/>BT<c/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TESTA<b>B<c></c>CT</b>BTAT<x>XA<b></b>BT<c></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test>TEST<a>ABCTBT</a>AT<x>X<a>ABTCT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'c')
self.assertEqual(_bytes('<test>TEST<a>A<b>BCT</b>BT</a>AT<x>X<a>A<b></b>BTCT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_pi_comment(self):
XML = self.etree.XML
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(root, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT<?PI2?></test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, PI, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
def test_strip_tags_pi_comment_all(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI)
self.assertEqual(_bytes('<!--comment1-->\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment)
self.assertEqual(_bytes('<?PI1?>\n<test>TESTXT<?PI2?></test>\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI, Comment)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment, PI)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
def test_strip_tags_doc_style(self):
XML = self.etree.XML
xml = _bytes('''
<div>
<div>
I like <strong>sheep</strong>.
<br/>
I like lots of <strong>sheep</strong>.
<br/>
Click <a href="http://www.sheep.com">here</a> for <a href="http://www.sheep.com">those</a> sheep.
<br/>
</div>
</div>
'''.strip())
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''), xml).replace(_bytes('<br/>'), _bytes('<br></br>')),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'a', 'br')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''),
re.sub(_bytes('<br[^>]*>'), _bytes(''), xml)),
self._writeElement(root))
def test_strip_tags_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>CT</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>XA<b xmlns="urn:a"></b>BT<c xmlns="urn:x"></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TESTA<b>B<c xmlns="urn:c"></c>CT</b>BTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_and_remove(self):
# previously crashed
HTML = self.etree.HTML
root = HTML(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'))[0][0]
self.assertEqual(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'),
self.etree.tostring(root))
self.etree.strip_tags(root, 'b')
self.assertEqual(_bytes('<div><h1>title</h1> foo <p>boo</p></div>'),
self.etree.tostring(root))
root.remove(root[0])
self.assertEqual(_bytes('<div><p>boo</p></div>'),
self.etree.tostring(root))
def test_pi(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.ProcessingInstruction
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].target, 'foo')
self.assertEqual(a[0].text, 'some more text')
def test_pi_parse(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my test ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].text, "my test ")
def test_pi_pseudo_attributes_get(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].get('my'), "1")
self.assertEqual(root[0].get('test'), " abc ")
self.assertEqual(root[0].get('quotes'), "' '")
self.assertEqual(root[0].get('only'), None)
self.assertEqual(root[0].get('names'), None)
self.assertEqual(root[0].get('nope'), None)
def test_pi_pseudo_attributes_attrib(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].attrib['my'], "1")
self.assertEqual(root[0].attrib['test'], " abc ")
self.assertEqual(root[0].attrib['quotes'], "' '")
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'only')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'names')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'nope')
def test_deepcopy_pi(self):
# previously caused a crash
ProcessingInstruction = self.etree.ProcessingInstruction
a = ProcessingInstruction("PI", "ONE")
b = copy.deepcopy(a)
b.text = "ANOTHER"
self.assertEqual('ONE', a.text)
self.assertEqual('ANOTHER', b.text)
def test_deepcopy_elementtree_pi(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes("<?mypi my test ?><test/><!--comment -->"))
tree1 = self.etree.ElementTree(root)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_deepcopy_elementtree_dtd(self):
XML = self.etree.XML
tostring = self.etree.tostring
xml = _bytes('<!DOCTYPE test [\n<!ENTITY entity "tasty">\n]>\n<test/>')
root = XML(xml)
tree1 = self.etree.ElementTree(root)
self.assertEqual(xml, tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(xml, tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_attribute_set(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
self.assertRaises(TypeError, root.set, "newattr", 5)
def test_parse_remove_comments(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
parser = XMLParser(remove_comments=True)
root = fromstring(xml, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_parse_remove_pis(self):
parse = self.etree.parse
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<?test?><a><?A?><b><?B?><c/></b><?C?></a><?tail?>')
f = BytesIO(xml)
tree = parse(f)
self.assertEqual(
xml,
tostring(tree))
parser = XMLParser(remove_pis=True)
tree = parse(f, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(tree))
def test_parse_parser_type_error(self):
# ET raises IOError only
parse = self.etree.parse
self.assertRaises(TypeError, parse, 'notthere.xml', object())
def test_iterparse_tree_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
def name(event, el):
if event == 'comment':
return el.text
else:
return el.tag
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(6, len(events))
self.assertEqual(['A', ' B ', 'c', 'b', 'C', 'a'],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_pis(self):
# ET removes pis
iterparse = self.etree.iterparse
tostring = self.etree.tostring
ElementTree = self.etree.ElementTree
def name(event, el):
if event == 'pi':
return (el.target, el.text)
else:
return el.tag
f = BytesIO('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>')
events = list(iterparse(f, events=('end', 'pi')))
root = events[-2][1]
self.assertEqual(8, len(events))
self.assertEqual([('pia','a'), ('pib','b'), ('pic','c'), 'c', 'b',
('pid','d'), 'a', ('pie','e')],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>'),
tostring(ElementTree(root)))
def test_iterparse_remove_comments(self):
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, remove_comments=True,
events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(['c', 'b', 'a'],
[ el.tag for (event, el) in events ])
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_iterparse_broken(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></a>')
# ET raises ExpatError, lxml raises XMLSyntaxError
self.assertRaises(self.etree.XMLSyntaxError, list, iterparse(f))
def test_iterparse_strip(self):
iterparse = self.etree.iterparse
f = BytesIO("""
<a> \n \n <b> b test </b> \n
\n\t <c> \n </c> </a> \n """)
iterator = iterparse(f, remove_blank_text=True)
text = [ (element.text, element.tail)
for event, element in iterator ]
self.assertEqual(
[(" b test ", None), (" \n ", None), (None, None)],
text)
def test_iterparse_tag(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterparse_tag_ns(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_ns_empty(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual([], events)
def test_iterparse_tag_ns_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_tag_ns_empty_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual([], events)
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_encoding_error(self):
text = _str('Søk på nettet')
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
list, self.etree.iterparse(BytesIO(xml_latin1)))
def test_iterparse_encoding_8bit_override(self):
text = _str('Søk på nettet', encoding="UTF-8")
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
iterator = self.etree.iterparse(BytesIO(xml_latin1),
encoding="iso-8859-1")
self.assertEqual(1, len(list(iterator)))
a = iterator.root
self.assertEqual(a.text, text)
def test_iterparse_keep_cdata(self):
tostring = self.etree.tostring
f = BytesIO('<root><![CDATA[test]]></root>')
context = self.etree.iterparse(f, strip_cdata=False)
content = [ el.text for event,el in context ]
self.assertEqual(['test'], content)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(context.root))
def test_parser_encoding_unknown(self):
self.assertRaises(
LookupError, self.etree.XMLParser, encoding="hopefully unknown")
def test_parser_encoding(self):
self.etree.XMLParser(encoding="ascii")
self.etree.XMLParser(encoding="utf-8")
self.etree.XMLParser(encoding="iso-8859-1")
def test_feed_parser_recover(self):
parser = self.etree.XMLParser(recover=True)
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
parser.feed('ot><')
parser.feed('a test="works"')
parser.feed('><othertag/></root') # <a> not closed!
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
self.assertEqual(len(root[0]), 1)
self.assertEqual(root[0][0].tag, "othertag")
# FIXME: would be nice to get some errors logged ...
#self.assertTrue(len(parser.error_log) > 0, "error log is empty")
def test_elementtree_parser_target_type_error(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE" # no Element!
parser = self.etree.XMLParser(target=Target())
tree = self.etree.ElementTree()
self.assertRaises(TypeError,
tree.parse, BytesIO("<TAG/>"), parser=parser)
self.assertEqual(["start", "end"], events)
def test_parser_target_feed_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
parser.feed(_bytes('<root>A<a>ca</a>B</root>'))
done = parser.close()
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_fromstring_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
done = self.etree.fromstring(_bytes('<root>A<a>ca</a>B</root>'),
parser=parser)
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_comment(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def comment(self, text):
events.append("comment-" + text)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<!--a--><root>A<!--b--><sub/><!--c-->B</root><!--d-->'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["comment-a", "start-root", "data-A", "comment-b",
"start-sub", "end-sub", "comment-c", "data-B",
"end-root", "comment-d"],
events)
def test_parser_target_pi(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def pi(self, target, data):
events.append("pi-" + target + "-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<?test a?><root>A<?test b?>B</root><?test c?>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["pi-test-a", "start-root", "data-A", "pi-test-b",
"data-B", "end-root", "pi-test-c"],
events)
def test_parser_target_cdata(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target(),
strip_cdata=False)
parser.feed(_bytes('<root>A<a><![CDATA[ca]]></a>B</root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B", "end-root"],
events)
def test_parser_target_recover(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target(),
recover=True)
parser.feed(_bytes('<root>A<a>ca</a>B</not-root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B",
"end-root", "close"],
events)
def test_iterwalk_tag(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="b", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterwalk_tag_all(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterwalk(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
events = list(iterwalk(root))
self.assertEqual(
[('end', root[0]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_start(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start',))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[1])],
events)
def test_iterwalk_start_end(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start','end'))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('end', root[0]),
('start', root[1]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_clear(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root)
for event, elem in iterator:
elem.clear()
self.assertEqual(0,
len(root))
def test_iterwalk_attrib_ns(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a xmlns="ns1"><b><c xmlns="ns2"/></b></a>'))
attr_name = '{testns}bla'
events = []
iterator = iterwalk(root, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{ns1}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterwalk_getiterator(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
counts = []
for event, elem in iterwalk(root):
counts.append(len(list(elem.getiterator())))
self.assertEqual(
[1,2,1,4],
counts)
def test_resolve_string_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url, context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_bytes_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
(_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url).encode('utf-8'),
context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filelike_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
SillyFileLike(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filename_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_filename_dtd_relative(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, fileInTestDir(test_url))
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser,
base_url=fileInTestDir('__test.xml'))
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_file_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
open(fileInTestDir('test.dtd'), 'rb'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_empty(self):
parse = self.etree.parse
parser = self.etree.XMLParser(load_dtd=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class check(object):
resolved = False
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
check.resolved = True
return self.resolve_empty(context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
self.assertRaises(etree.XMLSyntaxError, parse, StringIO(xml), parser)
self.assertTrue(check.resolved)
def test_resolve_error(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
class _LocalException(Exception):
pass
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
raise _LocalException
parser.resolvers.add(MyResolver())
xml = '<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>'
self.assertRaises(_LocalException, parse, BytesIO(xml), parser)
if etree.LIBXML_VERSION > (2,6,20):
def test_entity_parse(self):
parse = self.etree.parse
tostring = self.etree.tostring
parser = self.etree.XMLParser(resolve_entities=False)
Entity = self.etree.Entity
xml = _bytes('<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>')
tree = parse(BytesIO(xml), parser)
root = tree.getroot()
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&myentity;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "myentity")
self.assertEqual(_bytes('<doc>&myentity;</doc>'),
tostring(root))
def test_entity_restructure(self):
xml = _bytes('''<!DOCTYPE root [ <!ENTITY nbsp " "> ]>
<root>
<child1/>
<child2/>
<child3> </child3>
</root>''')
parser = self.etree.XMLParser(resolve_entities=False)
root = etree.fromstring(xml, parser)
self.assertEqual([ el.tag for el in root ],
['child1', 'child2', 'child3'])
root[0] = root[-1]
self.assertEqual([ el.tag for el in root ],
['child3', 'child2'])
self.assertEqual(root[0][0].text, ' ')
self.assertEqual(root[0][0].name, 'nbsp')
def test_entity_append(self):
Entity = self.etree.Entity
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.append( Entity("test") )
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&test;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "test")
self.assertEqual(_bytes('<root>&test;</root>'),
tostring(root))
def test_entity_values(self):
Entity = self.etree.Entity
self.assertEqual(Entity("test").text, '&test;')
self.assertEqual(Entity("#17683").text, '䔓')
self.assertEqual(Entity("#x1768").text, 'ᝨ')
self.assertEqual(Entity("#x98AF").text, '颯')
def test_entity_error(self):
Entity = self.etree.Entity
self.assertRaises(ValueError, Entity, 'a b c')
self.assertRaises(ValueError, Entity, 'a,b')
self.assertRaises(ValueError, Entity, 'a\0b')
self.assertRaises(ValueError, Entity, '#abc')
self.assertRaises(ValueError, Entity, '#xxyz')
def test_cdata(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.text = CDATA('test')
self.assertEqual('test',
root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_type(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
root.text = CDATA("test")
self.assertEqual('test', root.text)
root.text = CDATA(_str("test"))
self.assertEqual('test', root.text)
self.assertRaises(TypeError, CDATA, 1)
def test_cdata_errors(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
cdata = CDATA('test')
self.assertRaises(TypeError,
setattr, root, 'tail', cdata)
self.assertRaises(TypeError,
root.set, 'attr', cdata)
self.assertRaises(TypeError,
operator.setitem, root.attrib, 'attr', cdata)
def test_cdata_parser(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual('test', root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_xpath(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
self.assertEqual(['test'], root.xpath('//text()'))
# TypeError in etree, AssertionError in ElementTree;
def test_setitem_assert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
self.assertRaises(TypeError,
a.__setitem__, 0, 'foo')
def test_append_error(self):
Element = self.etree.Element
root = Element('root')
# raises AssertionError in ElementTree
self.assertRaises(TypeError, root.append, None)
self.assertRaises(TypeError, root.extend, [None])
self.assertRaises(TypeError, root.extend, [Element('one'), None])
self.assertEqual('one', root[0].tag)
def test_addnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[1].addnext(root[0])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[0].addprevious(root[1])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addnext_root(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
self.assertRaises(TypeError, a.addnext, b)
def test_addnext_root(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
self.assertRaises(TypeError, a.addnext, b)
def test_addprevious_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(pi)
self.assertEqual(_bytes('<root><?TARGET TEXT?>TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(pi)
self.assertEqual(_bytes('<?TARGET TEXT?>\n<root></root>'),
self._writeElement(root))
def test_addnext_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(pi)
self.assertEqual(_bytes('<root><a></a><?TARGET TEXT?>TAIL</root>'),
self._writeElement(root))
def test_addnext_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(pi)
self.assertEqual(_bytes('<root></root>\n<?TARGET TEXT?>'),
self._writeElement(root))
def test_addnext_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(comment)
self.assertEqual(_bytes('<root><a></a><!--TEXT -->TAIL</root>'),
self._writeElement(root))
def test_addnext_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(comment)
self.assertEqual(_bytes('<root></root>\n<!--TEXT -->'),
self._writeElement(root))
def test_addprevious_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(comment)
self.assertEqual(_bytes('<root><!--TEXT -->TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(comment)
self.assertEqual(_bytes('<!--TEXT -->\n<root></root>'),
self._writeElement(root))
# ET's Elements have items() and key(), but not values()
def test_attribute_values(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
values = root.values()
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
# gives error in ElementTree
def test_comment_empty(self):
Element = self.etree.Element
Comment = self.etree.Comment
a = Element('a')
a.append(Comment())
self.assertEqual(
_bytes('<a><!----></a>'),
self._writeElement(a))
# ElementTree ignores comments
def test_comment_parse_empty(self):
ElementTree = self.etree.ElementTree
tostring = self.etree.tostring
xml = _bytes('<a><b/><!----><c/></a>')
f = BytesIO(xml)
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
'',
a[1].text)
self.assertEqual(
xml,
tostring(a))
# ElementTree ignores comments
def test_comment_no_proxy_yet(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b><!-- hoi --><c></c></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
' hoi ',
a[1].text)
# does not raise an exception in ElementTree
def test_comment_immutable(self):
Element = self.etree.Element
Comment = self.etree.Comment
c = Comment()
el = Element('myel')
self.assertRaises(TypeError, c.append, el)
self.assertRaises(TypeError, c.insert, 0, el)
self.assertRaises(TypeError, c.set, "myattr", "test")
# test passing 'None' to dump
def test_dump_none(self):
self.assertRaises(TypeError, self.etree.dump, None)
def test_prefix(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns:foo="http://www.infrae.com/ns/1"><foo:b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
'foo',
a[0].prefix)
def test_prefix_default_ns(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns="http://www.infrae.com/ns/1"><b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
None,
a[0].prefix)
def test_getparent(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getparent())
self.assertEqual(
a,
b.getparent())
self.assertEqual(
b.getparent(),
c.getparent())
self.assertEqual(
b,
d.getparent())
def test_iterchildren(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren():
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iterchildren_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren(reversed=True):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iterchildren_tag(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(tag='two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag='two'):
result.append(el.text)
self.assertEqual(['Bla', 'Two'], result)
def test_iterchildren_tag_multiple(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(tag=['two', 'three']):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag=['two', 'three']):
result.append(el.text)
self.assertEqual([None, 'Bla', 'Two'], result)
def test_iterancestors(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.iterancestors()))
self.assertEqual(
[a],
list(b.iterancestors()))
self.assertEqual(
[a],
list(c.iterancestors()))
self.assertEqual(
[b, a],
list(d.iterancestors()))
def test_iterancestors_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[a],
list(d.iterancestors(tag='a')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag='*')))
def test_iterancestors_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('a', 'b'))))
self.assertEqual(
[],
list(d.iterancestors(tag=('w', 'x', 'y', 'z'))))
self.assertEqual(
[],
list(d.iterancestors(tag=('d', 'x'))))
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('b', '*'))))
self.assertEqual(
[b],
list(d.iterancestors(tag=('b', 'c'))))
def test_iterdescendants(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, d, c, e],
list(a.iterdescendants()))
self.assertEqual(
[],
list(d.iterdescendants()))
def test_iterdescendants_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.iterdescendants('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a2],
list(a.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants('a')))
def test_iterdescendants_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, e],
list(a.iterdescendants(tag=('a', 'b', 'e'))))
a2 = SubElement(e, 'a')
self.assertEqual(
[b, a2],
list(a.iterdescendants(tag=('a', 'b'))))
self.assertEqual(
[],
list(c.iterdescendants(tag=('x', 'y', 'z'))))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants(tag=('x', 'y', 'z', '*'))))
def test_getroottree(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
a,
a.getroottree().getroot())
self.assertEqual(
a,
b.getroottree().getroot())
self.assertEqual(
a,
d.getroottree().getroot())
def test_getnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(
None,
a.getnext())
self.assertEqual(
c,
b.getnext())
self.assertEqual(
None,
c.getnext())
def test_getprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getprevious())
self.assertEqual(
b,
c.getprevious())
self.assertEqual(
None,
b.getprevious())
def test_itersiblings(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings()))
self.assertEqual(
[c],
list(b.itersiblings()))
self.assertEqual(
[],
list(c.itersiblings()))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True)))
self.assertEqual(
[],
list(b.itersiblings(preceding=True)))
def test_itersiblings_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings(tag='XXX')))
self.assertEqual(
[c],
list(b.itersiblings(tag='c')))
self.assertEqual(
[c],
list(b.itersiblings(tag='*')))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag='b')))
self.assertEqual(
[],
list(c.itersiblings(preceding=True, tag='c')))
def test_itersiblings_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[],
list(a.itersiblings(tag=('XXX', 'YYY'))))
self.assertEqual(
[c, e],
list(b.itersiblings(tag=('c', 'd', 'e'))))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag=('b', 'b', 'c', 'd'))))
self.assertEqual(
[c, b],
list(e.itersiblings(preceding=True, tag=('c', '*'))))
def test_parseid(self):
parseid = self.etree.parseid
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
tree, dic = parseid(BytesIO(xml_text))
root = tree.getroot()
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID_empty(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {}
self._checkIDDict(dic, expected)
def _checkIDDict(self, dic, expected):
self.assertEqual(len(dic),
len(expected))
self.assertEqual(sorted(dic.items()),
sorted(expected.items()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iteritems()),
sorted(expected.iteritems()))
self.assertEqual(sorted(dic.keys()),
sorted(expected.keys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iterkeys()),
sorted(expected.iterkeys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.values()),
sorted(expected.values()))
self.assertEqual(sorted(dic.itervalues()),
sorted(expected.itervalues()))
def test_namespaces(self):
etree = self.etree
r = {'foo': 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
'foo',
e.prefix)
self.assertEqual(
_bytes('<foo:bar xmlns:foo="http://ns.infrae.com/foo"></foo:bar>'),
self._writeElement(e))
def test_namespaces_default(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
None,
e.prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e.tag)
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo"></bar>'),
self._writeElement(e))
def test_namespaces_default_and_attr(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e.set('{http://ns.infrae.com/hoi}test', 'value')
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi" hoi:test="value"></bar>'),
self._writeElement(e))
def test_attribute_keeps_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root xmlns="http://test/ns">'
'<sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_keeps_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<test:sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<sub xmlns="http://test/ns"'
' xmlns:ns0="http://test/ns" ns0:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={'test': 'http://test/ns',
None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<test:root xmlns:test="http://test/ns" xmlns="http://test/ns">'
'<test:sub test:attr="value"/>'
'</test:root>'),
etree.tostring(root))
def test_namespaces_elementtree(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}z', nsmap=r)
tree = etree.ElementTree(element=e)
etree.SubElement(e, '{http://ns.infrae.com/hoi}x')
self.assertEqual(
_bytes('<z xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi"><hoi:x></hoi:x></z>'),
self._writeElement(e))
def test_namespaces_default_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e1 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertEqual(
None,
e1[0].prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1[0].tag)
def test_namespaces_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/BAR'}
e1 = etree.Element('{http://ns.infrae.com/BAR}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertNotEqual(
None,
e2.prefix)
self.assertEqual(
'{http://ns.infrae.com/BAR}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e2.tag)
def test_namespaces_reuse_after_move(self):
ns_href = "http://a.b.c"
one = self.etree.fromstring(
_bytes('<foo><bar xmlns:ns="%s"><ns:baz/></bar></foo>' % ns_href))
baz = one[0][0]
two = self.etree.fromstring(
_bytes('<root xmlns:ns="%s"/>' % ns_href))
two.append(baz)
del one # make sure the source document is deallocated
self.assertEqual('{%s}baz' % ns_href, baz.tag)
self.assertEqual(
_bytes('<root xmlns:ns="%s"><ns:baz/></root>' % ns_href),
self.etree.tostring(two))
def test_namespace_cleanup(self):
xml = _bytes('<foo xmlns="F" xmlns:x="x"><bar xmlns:ns="NS" xmlns:b="b" xmlns="B"><ns:baz/></bar></foo>')
root = self.etree.fromstring(xml)
self.assertEqual(xml,
self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
_bytes('<foo xmlns="F"><bar xmlns:ns="NS" xmlns="B"><ns:baz/></bar></foo>'),
self.etree.tostring(root))
def test_element_nsmap(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
r,
e.nsmap)
def test_subelement_nsmap(self):
etree = self.etree
re = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=re)
rs = {None: 'http://ns.infrae.com/honk',
'top': 'http://ns.infrae.com/top'}
s = etree.SubElement(e, '{http://ns.infrae.com/honk}bar', nsmap=rs)
r = re.copy()
r.update(rs)
self.assertEqual(re, e.nsmap)
self.assertEqual(r, s.nsmap)
def test_html_prefix_nsmap(self):
etree = self.etree
el = etree.HTML('<hha:page-description>aa</hha:page-description>').find('.//page-description')
self.assertEqual({'hha': None}, el.nsmap)
def test_getiterator_filter_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator('a', 'b')))
self.assertEqual(
[],
list(a.getiterator('x', 'y')))
self.assertEqual(
[a, f],
list(a.getiterator('f', 'a')))
self.assertEqual(
[c, e, f],
list(c.getiterator('c', '*', 'a')))
self.assertEqual(
[],
list(a.getiterator( (), () )))
def test_getiterator_filter_multiple_tuple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator( ('a', 'b') )))
self.assertEqual(
[],
list(a.getiterator( ('x', 'y') )))
self.assertEqual(
[a, f],
list(a.getiterator( ('f', 'a') )))
self.assertEqual(
[c, e, f],
list(c.getiterator( ('c', '*', 'a') )))
self.assertEqual(
[],
list(a.getiterator( () )))
def test_getiterator_filter_namespace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{a}b')
c = SubElement(a, '{a}c')
d = SubElement(b, '{b}d')
e = SubElement(c, '{a}e')
f = SubElement(c, '{b}f')
g = SubElement(c, 'g')
self.assertEqual(
[a],
list(a.getiterator('{a}a')))
self.assertEqual(
[],
list(a.getiterator('{b}a')))
self.assertEqual(
[],
list(a.getiterator('a')))
self.assertEqual(
[a,b,d,c,e,f,g],
list(a.getiterator('*')))
self.assertEqual(
[f],
list(c.getiterator('{b}*')))
self.assertEqual(
[d, f],
list(a.getiterator('{b}*')))
self.assertEqual(
[g],
list(a.getiterator('g')))
self.assertEqual(
[g],
list(a.getiterator('{}g')))
self.assertEqual(
[g],
list(a.getiterator('{}*')))
def test_getiterator_filter_local_name(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{nsA}b')
c = SubElement(b, '{nsB}b')
d = SubElement(a, 'b')
e = SubElement(a, '{nsA}e')
f = SubElement(e, '{nsB}e')
g = SubElement(e, 'e')
self.assertEqual(
[b, c, d],
list(a.getiterator('{*}b')))
self.assertEqual(
[e, f, g],
list(a.getiterator('{*}e')))
self.assertEqual(
[a, b, c, d, e, f, g],
list(a.getiterator('{*}*')))
def test_getiterator_filter_entities(self):
Element = self.etree.Element
Entity = self.etree.Entity
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
entity_b = Entity("TEST-b")
b.append(entity_b)
self.assertEqual(
[entity_b],
list(a.getiterator(Entity)))
entity_a = Entity("TEST-a")
a.append(entity_a)
self.assertEqual(
[entity_b, entity_a],
list(a.getiterator(Entity)))
self.assertEqual(
[entity_b],
list(b.getiterator(Entity)))
def test_getiterator_filter_element(self):
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator(Element)))
def test_getiterator_filter_all_comment_pi(self):
# ElementTree iterates over everything here
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator('*')))
def test_elementtree_find_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(tree.find(QName("c")), tree.getroot()[2])
def test_elementtree_findall_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(len(list(tree.findall(QName("c")))), 1)
def test_elementtree_findall_ns_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(
_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>')))
self.assertEqual(len(list(tree.findall(QName("b")))), 2)
self.assertEqual(len(list(tree.findall(QName("X", "b")))), 1)
def test_findall_ns(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>'))
self.assertEqual(len(root.findall(".//{X}b")), 2)
self.assertEqual(len(root.findall(".//{X}*")), 2)
self.assertEqual(len(root.findall(".//b")), 3)
def test_findall_different_nsmaps(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_findall_different_nsmaps(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_findall_syntax_error(self):
XML = self.etree.XML
root = XML(_bytes('<a><b><c/></b><b/><c><b/><b/></c><b/></a>'))
self.assertRaises(SyntaxError, root.findall, '')
self.assertRaises(SyntaxError, root.findall, '//') # absolute path on Element
self.assertRaises(SyntaxError, root.findall, './//')
def test_index(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
for i in range(10):
self.assertEqual(
i,
e.index(e[i]))
self.assertEqual(
3, e.index(e[3], 3))
self.assertRaises(
ValueError, e.index, e[3], 4)
self.assertRaises(
ValueError, e.index, e[3], 0, 2)
self.assertRaises(
ValueError, e.index, e[8], 0, -3)
self.assertRaises(
ValueError, e.index, e[8], -5, -3)
self.assertEqual(
8, e.index(e[8], 0, -1))
self.assertEqual(
8, e.index(e[8], -12, -1))
self.assertEqual(
0, e.index(e[0], -12, -1))
def test_replace(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
el = etree.SubElement(e, 'a%s' % i)
el.text = "text%d" % i
el.tail = "tail%d" % i
child0 = e[0]
child1 = e[1]
child2 = e[2]
e.replace(e[0], e[1])
self.assertEqual(
9, len(e))
self.assertEqual(
child1, e[0])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child0.tail, "tail0")
self.assertEqual(
child2, e[1])
e.replace(e[-1], e[0])
self.assertEqual(
child1, e[-1])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child2, e[0])
def test_replace_new(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
new_element = etree.Element("test")
new_element.text = "TESTTEXT"
new_element.tail = "TESTTAIL"
child1 = e[1]
e.replace(e[0], new_element)
self.assertEqual(
new_element, e[0])
self.assertEqual(
"TESTTEXT",
e[0].text)
self.assertEqual(
"TESTTAIL",
e[0].tail)
self.assertEqual(
child1, e[1])
def test_setslice_all_empty_reversed(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[::-1] = s
self.assertEqual(
[g, f, e],
list(a))
def test_setslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::2] = [x, y]
self.assertEqual(
[b, x, d, y],
list(a))
def test_setslice_step_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::-1] = [x, y]
self.assertEqual(
[y, x, d, e],
list(a))
def test_setslice_step_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[::-2] = [x, y]
self.assertEqual(
[b, y, d, x],
list(a))
def test_setslice_step_overrun(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
try:
slice
except NameError:
print("slice() not found")
return
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
z = Element('z')
self.assertRaises(
ValueError,
operator.setitem, a, slice(1,None,2), [x, y, z])
self.assertEqual(
[b, c, d, e],
list(a))
def test_sourceline_XML(self):
XML = self.etree.XML
root = XML(_bytes('''<?xml version="1.0"?>
<root><test>
<bla/></test>
</root>
'''))
self.assertEqual(
[2, 2, 4],
[ el.sourceline for el in root.getiterator() ])
def test_sourceline_parse(self):
parse = self.etree.parse
tree = parse(fileInTestDir('include/test_xinclude.xml'))
self.assertEqual(
[1, 2, 3],
[ el.sourceline for el in tree.getiterator() ])
def test_sourceline_iterparse_end(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml')) ]
self.assertEqual(
[2, 3, 1],
lines)
def test_sourceline_iterparse_start(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml'),
events=("start",)) ]
self.assertEqual(
[1, 2, 3],
lines)
def test_sourceline_element(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element("test")
self.assertEqual(None, el.sourceline)
child = SubElement(el, "test")
self.assertEqual(None, el.sourceline)
self.assertEqual(None, child.sourceline)
def test_XML_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_XML_set_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
docinfo.URL = "https://secret/url"
self.assertEqual(docinfo.URL, "https://secret/url")
def test_parse_stringio_base_url(self):
etree = self.etree
tree = etree.parse(BytesIO("<root/>"), base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_parse_base_url_docinfo(self):
etree = self.etree
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_HTML_base_url_docinfo(self):
etree = self.etree
root = etree.HTML(_bytes("<html/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_docinfo_public(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="ascii"?>'
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = '<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id)
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "ascii")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, pub_id)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_system(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
sys_id = "some.dtd"
doctype_string = '<!DOCTYPE html SYSTEM "%s">' % sys_id
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_empty(self):
etree = self.etree
xml = _bytes('<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, '')
def test_docinfo_name_only(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root><root></root>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'root')
self.assertEqual(docinfo.doctype, '<!DOCTYPE root>')
def test_doctype_name_only_roundtrip(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml, etree.tostring(tree))
def test_doctype_output_override(self):
etree = self.etree
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = _bytes('<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id))
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml.replace(_bytes('<!DOCTYPE root>'), doctype_string),
etree.tostring(tree, doctype=doctype_string))
def test_xml_base(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.base = "https://secret/url"
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_xml_base_attribute(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.set('{http://www.w3.org/XML/1998/namespace}base',
"https://secret/url")
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_html_base(self):
etree = self.etree
root = etree.HTML(_bytes("<html><body></body></html>"),
base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
def test_html_base_tag(self):
etree = self.etree
root = etree.HTML(_bytes('<html><head><base href="http://no/such/url"></head></html>'))
self.assertEqual(root.base, "http://no/such/url")
def test_parse_fileobject_unicode(self):
# parse from a file object that returns unicode strings
f = LargeFileLikeUnicode()
tree = self.etree.parse(f)
root = tree.getroot()
self.assertTrue(root.tag.endswith('root'))
def test_dtd_io(self):
# check that DTDs that go in also go back out
xml = _bytes('''\
<!DOCTYPE test SYSTEM "test.dtd" [
<!ENTITY entity "tasty">
<!ELEMENT test (a)>
<!ELEMENT a (#PCDATA)>
]>
<test><a>test-test</a></test>\
''')
tree = self.etree.parse(BytesIO(xml))
self.assertEqual(self.etree.tostring(tree).replace(_bytes(" "), _bytes("")),
xml.replace(_bytes(" "), _bytes("")))
def test_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\0ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\0ho')
self.assertRaises(ValueError, Element, 'ha\0ho')
def test_unicode_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\0ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\0ho'))
self.assertRaises(ValueError, Element,
_str('ha\0ho'))
def test_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x02ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x02ho')
self.assertRaises(ValueError, Element, 'ha\x07ho')
self.assertRaises(ValueError, Element, 'ha\x02ho')
def test_unicode_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\x02ho'))
def test_unicode_byte_invalid_sequence(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x02ho'))
def test_encoding_tostring_utf16(self):
# ElementTree fails to serialize this
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding='UTF-16')
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(result))
def test_tostring_none(self):
# ElementTree raises an AssertionError here
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring, None)
def test_tostring_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=False)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=True)
self.assertEqual(result, _bytes("<a>\n <b/>\n <c/>\n</a>\n"))
def test_tostring_with_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.tail = "aTAIL"
b = SubElement(a, 'b')
b.tail = "bTAIL"
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
result = tostring(a, with_tail=False)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>"))
result = tostring(a, with_tail=True)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
def test_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
Element = self.etree.Element
tree = Element("root").getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes("<root/>")).getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"
)).getroottree()
self.assertEqual(True, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"
)).getroottree()
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes("<root/>"))
tree = ElementTree(root)
self.assertEqual(None, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=False)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone_in_out(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes(
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n<root/>"))
tree = ElementTree(root)
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
def test_tostring_method_text_encoding(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str("Søk på nettet")
c = SubElement(a, 'c')
c.text = "C"
result = tostring(a, method="text", encoding="UTF-16")
self.assertEqual(_str('ABSøk på nettetCtail').encode("UTF-16"),
result)
def test_tostring_method_text_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = _str('Søk på nettetA')
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str('Søk på nettetB')
c = SubElement(a, 'c')
c.text = "C"
self.assertRaises(UnicodeEncodeError,
tostring, a, method="text")
self.assertEqual(
_str('Søk på nettetABSøk på nettetBCtail').encode('utf-8'),
tostring(a, encoding="UTF-8", method="text"))
def test_tounicode(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tounicode(a), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tounicode(a)))
def test_tounicode_element(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(isinstance(tounicode(c), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tounicode(b)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tounicode(c)))
def test_tounicode_none(self):
tounicode = self.etree.tounicode
self.assertRaises(TypeError, self.etree.tounicode, None)
def test_tounicode_element_tail(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(tounicode(b) == '<b/>Foo' or
tounicode(b) == '<b />Foo')
def test_tounicode_pretty(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tounicode(a)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_tostring_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tostring(a, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tostring(a, encoding=_unicode)))
def test_tostring_unicode_element(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(isinstance(tostring(c, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tostring(b, encoding=_unicode)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tostring(c, encoding=_unicode)))
def test_tostring_unicode_none(self):
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring,
None, encoding=_unicode)
def test_tostring_unicode_element_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(tostring(b, encoding=_unicode) == '<b/>Foo' or
tostring(b, encoding=_unicode) == '<b />Foo')
def test_tostring_unicode_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding=_unicode)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_pypy_proxy_collect(self):
root = etree.Element('parent')
etree.SubElement(root, 'child')
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
# in PyPy, GC used to kill the Python proxy instance without cleanup
gc.collect()
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
# helper methods
def _writeElement(self, element, encoding='us-ascii', compression=0):
"""Write out element for comparison.
"""
ElementTree = self.etree.ElementTree
f = BytesIO()
tree = ElementTree(element=element)
tree.write(f, encoding=encoding, compression=compression)
data = f.getvalue()
if compression:
data = zlib.decompress(data)
return canonicalize(data)
class _XIncludeTestCase(HelperTestCase):
def test_xinclude_text(self):
filename = fileInTestDir('test_broken.xml')
root = etree.XML(_bytes('''\
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="%s" parse="text"/>
</doc>
''' % filename))
old_text = root.text
content = read_file(filename)
old_tail = root[0].tail
self.include( etree.ElementTree(root) )
self.assertEqual(old_text + content + old_tail,
root.text)
def test_xinclude(self):
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'))
self.assertNotEqual(
'a',
tree.getroot()[1].tag)
# process xincludes
self.include( tree )
# check whether we find it replaced with included data
self.assertEqual(
'a',
tree.getroot()[1].tag)
def test_xinclude_resolver(self):
class res(etree.Resolver):
include_text = read_file(fileInTestDir('test.xml'))
called = {}
def resolve(self, url, id, context):
if url.endswith(".dtd"):
self.called["dtd"] = True
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
elif url.endswith("test_xinclude.xml"):
self.called["input"] = True
return None # delegate to default resolver
else:
self.called["include"] = True
return self.resolve_string(self.include_text, context)
res_instance = res()
parser = etree.XMLParser(load_dtd = True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser = parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("dtd", True), ("include", True), ("input", True)],
called)
class ETreeXIncludeTestCase(_XIncludeTestCase):
def include(self, tree):
tree.xinclude()
class ElementIncludeTestCase(_XIncludeTestCase):
from lxml import ElementInclude
def include(self, tree):
self.ElementInclude.include(tree.getroot())
class ETreeC14NTestCase(HelperTestCase):
def test_c14n(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write_c14n(f, compression=9)
gzfile = gzip.GzipFile(fileobj=BytesIO(f.getvalue()))
try:
s = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
s)
def test_c14n_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write_c14n(filename)
data = read_file(filename, 'rb')
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a><b></b></a>'),
data)
def test_c14n_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write_c14n(filename, compression=9)
f = gzip.open(filename, 'rb')
try:
data = f.read()
finally:
f.close()
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
data)
def test_c14n_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=True)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=False)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=True)
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_element_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=True)
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=False)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True, inclusive_ns_prefixes=['z'])
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
def test_c14n_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
def test_c14n_element_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=False)
self.assertEqual(_bytes('<z:b xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True)
self.assertEqual(_bytes('<z:b xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<z:b xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
def test_c14n_tostring_inclusive_ns_prefixes(self):
""" Regression test to fix memory allocation issues (use 3+ inclusive NS spaces)"""
tree = self.parse(_bytes(
'<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['x', 'y', 'z'])
self.assertEqual(_bytes('<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
class ETreeWriteTestCase(HelperTestCase):
def test_write(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b/></a>'),
s)
def test_write_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=9)
gzfile = gzip.GzipFile(fileobj=BytesIO(f.getvalue()))
try:
s = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s)
def test_write_gzip_level(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=0)
s0 = f.getvalue()
f = BytesIO()
tree.write(f)
self.assertEqual(f.getvalue(), s0)
f = BytesIO()
tree.write(f, compression=1)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
gzfile = gzip.GzipFile(fileobj=BytesIO(s))
try:
s1 = gzfile.read()
finally:
gzfile.close()
f = BytesIO()
tree.write(f, compression=9)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
gzfile = gzip.GzipFile(fileobj=BytesIO(s))
try:
s9 = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s0)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s1)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s9)
def test_write_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename)
data = read_file(filename, 'rb')
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a><b/></a>'),
data)
def test_write_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
f = gzip.open(filename, 'rb')
try:
data = f.read()
finally:
f.close()
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzip_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
data = etree.tostring(etree.parse(filename))
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzipfile_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
data = etree.tostring(etree.parse(
gzip.GzipFile(filename)))
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
class ETreeErrorLogTest(HelperTestCase):
etree = etree
def test_parse_error_logging(self):
parse = self.etree.parse
f = BytesIO('<a><b></c></b></a>')
self.etree.clear_error_log()
try:
parse(f)
logs = None
except SyntaxError:
e = sys.exc_info()[1]
logs = e.error_log
f.close()
self.assertTrue([ log for log in logs
if 'mismatch' in log.message ])
self.assertTrue([ log for log in logs
if 'PARSER' in log.domain_name])
self.assertTrue([ log for log in logs
if 'ERR_TAG_NAME_MISMATCH' in log.type_name ])
self.assertTrue([ log for log in logs
if 1 == log.line ])
self.assertTrue([ log for log in logs
if 15 == log.column ])
def _test_python_error_logging(self):
"""This can't really be tested as long as there isn't a way to
reset the logging setup ...
"""
parse = self.etree.parse
messages = []
class Logger(self.etree.PyErrorLog):
def log(self, entry, message, *args):
messages.append(message)
self.etree.use_global_python_log(Logger())
f = BytesIO('<a><b></c></b></a>')
try:
parse(f)
except SyntaxError:
pass
f.close()
self.assertTrue([ message for message in messages
if 'mismatch' in message ])
self.assertTrue([ message for message in messages
if ':PARSER:' in message])
self.assertTrue([ message for message in messages
if ':ERR_TAG_NAME_MISMATCH:' in message ])
self.assertTrue([ message for message in messages
if ':1:15:' in message ])
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeOnlyTestCase)])
suite.addTests([unittest.makeSuite(ETreeXIncludeTestCase)])
suite.addTests([unittest.makeSuite(ElementIncludeTestCase)])
suite.addTests([unittest.makeSuite(ETreeC14NTestCase)])
suite.addTests([unittest.makeSuite(ETreeWriteTestCase)])
suite.addTests([unittest.makeSuite(ETreeErrorLogTest)])
suite.addTests(
[make_doctest('../../../doc/tutorial.txt')])
if sys.version_info >= (2,6):
# now requires the 'with' statement
suite.addTests(
[make_doctest('../../../doc/api.txt')])
suite.addTests(
[make_doctest('../../../doc/FAQ.txt')])
suite.addTests(
[make_doctest('../../../doc/parsing.txt')])
suite.addTests(
[make_doctest('../../../doc/resolvers.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
{
"content_hash": "f6427355545f9aa54001812314c5f4b1",
"timestamp": "",
"source": "github",
"line_count": 3739,
"max_line_length": 175,
"avg_line_length": 35.35303557100829,
"alnum_prop": 0.5249460982713621,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "860aee2c0f39078ac49a5471664bf67cf577116b",
"size": "132226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lxml/src/lxml/tests/test_etree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from api.models import Item
admin.site.register(Item)
|
{
"content_hash": "a97f615f75065919b0cb2c0baaa9ffdd",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 22,
"alnum_prop": 0.8181818181818182,
"repo_name": "ArbiterGames/instants",
"id": "0988c5a1099ce8675c17ca819d7c1aec4fece732",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2668"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
from .base import * # noqa
DEBUG_PROPAGATE_EXCEPTIONS = True
SITE_ID = 1
SECRET_KEY = 'not very secret in tests'
USE_I18N = True
USE_L10N = True
|
{
"content_hash": "20b7d8bcdcb5b10c9b046f187f4f3f3b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 18.5,
"alnum_prop": 0.7027027027027027,
"repo_name": "toystori/v2",
"id": "37976df454272aad76cb6c3db5182426e723bb4f",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/settings/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "9473"
},
{
"name": "Makefile",
"bytes": "209"
},
{
"name": "Python",
"bytes": "12388"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "django-macaddress",
version = "1.0.0",
url = 'http://github.com/tubaman/django-macaddress',
license = 'BSD',
description = "MAC address model and form fields for Django apps.",
long_description = read('README.rst'),
author = 'Ryan Nowakowski',
author_email = 'tubaman@fattuba.com',
packages = ['macaddress'],
install_requires = ['netaddr'],
tests_require = ['django'],
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
]
)
|
{
"content_hash": "430d852adab2e636a368e8966b71b2fd",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 30.896551724137932,
"alnum_prop": 0.6116071428571429,
"repo_name": "jimfunk/django-macaddress",
"id": "08428ac28acff0c74b61e142947093bcef82d9f1",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3640"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class News(models.Model):
title = models.CharField(max_length=255)
author = models.TextField(null=True)
content = models.TextField(null=True)
mag = models.SmallIntegerField(
db_index=True,
null=True,
)
date = models.DateTimeField(
auto_now_add=False,
auto_now=False,
db_index=True,
)
def __unicode__(self):
return u'%s' % self.title
class OldNews(models.Model):
Titre = models.TextField(null=True)
Date = models.TextField(null=True)
Texte = models.TextField(null=True)
Auteur = models.TextField(null=True)
Analyse = models.BooleanField(default=False)
Resultats = models.BooleanField(default=False)
Interview = models.BooleanField(default=False)
def __unicode__(self):
return u'%s' % self.Titre
|
{
"content_hash": "e8e857658cf29e91a5f6767208b80b2f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 50,
"avg_line_length": 25.794117647058822,
"alnum_prop": 0.6533637400228051,
"repo_name": "Tooskich/python_core",
"id": "5504facb7965275f2891096c5eea49ff92e77e32",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75827"
}
],
"symlink_target": ""
}
|
from osc_lib.tests import utils as osc_utils
from unittest import mock
from saharaclient.api import cluster_templates as api_ct
from saharaclient.api import clusters as api_cl
from saharaclient.api import images as api_img
from saharaclient.api import node_group_templates as api_ngt
from saharaclient.osc.v2 import clusters as osc_cl
from saharaclient.tests.unit.osc.v1 import test_clusters as tc_v1
CLUSTER_INFO = {
"description": "Cluster template for tests",
"use_autoconfig": True,
"is_default": False,
"node_groups": [
{
"count": 2,
"id": "ng_id",
"name": "fakeng",
"plugin_name": 'fake',
"plugin_version": '0.1',
"node_group_template_id": 'ngt_id'
}
],
"plugin_version": "0.1",
"is_public": False,
"plugin_name": "fake",
"id": "cluster_id",
"anti_affinity": [],
"name": "fake",
"is_protected": False,
"cluster_template_id": "ct_id",
"neutron_management_network": "net_id",
"user_keypair_id": "test",
"status": 'Active',
"default_image_id": "img_id",
'verification': {
'status': 'GREEN',
'id': 'ver_id',
'cluster_id': 'cluster_id',
'checks': [
{
'status': 'GREEN',
'name': 'Some check'
}
]
}
}
CT_INFO = {
"plugin_name": "fake",
"plugin_version": "0.1",
"name": '"template',
"id": "ct_id"
}
NGT_INFO = {
'id': 'ngt_id',
'name': 'fakeng'
}
class TestClusters(tc_v1.TestClusters):
def setUp(self):
super(TestClusters, self).setUp()
self.app.api_version['data_processing'] = '2'
self.cl_mock = (
self.app.client_manager.data_processing.clusters)
self.ngt_mock = (
self.app.client_manager.data_processing.node_group_templates)
self.ct_mock = (
self.app.client_manager.data_processing.cluster_templates)
self.img_mock = (
self.app.client_manager.data_processing.images)
self.cl_mock.reset_mock()
self.ngt_mock.reset_mock()
self.ct_mock.reset_mock()
self.img_mock.reset_mock()
class TestCreateCluster(TestClusters):
# TODO(apavlov): check for creation with --json
def setUp(self):
super(TestCreateCluster, self).setUp()
self.cl_mock.create.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
self.img_mock.find_unique.return_value = api_img.Image(
None, {'id': 'img_id'})
self.net_mock = self.app.client_manager.network
self.net_mock.find_network.return_value = mock.Mock(id='net_id')
self.net_mock.reset_mock()
# Command to test
self.cmd = osc_cl.CreateCluster(self.app, None)
def test_cluster_create_minimum_options(self):
arglist = ['--name', 'fake', '--cluster-template', 'template',
'--image', 'ubuntu']
verifylist = [('name', 'fake'), ('cluster_template', 'template'),
('image', 'ubuntu')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.create.assert_called_once_with(
cluster_template_id='ct_id', count=None, default_image_id='img_id',
description=None, plugin_version='0.1', is_protected=False,
is_public=False, is_transient=False, name='fake', net_id=None,
plugin_name='fake', user_keypair_id=None)
def test_cluster_create_all_options(self):
arglist = ['--name', 'fake', '--cluster-template', 'template',
'--image', 'ubuntu', '--user-keypair', 'test',
'--neutron-network', 'net', '--description', 'descr',
'--transient', '--public', '--protected']
verifylist = [('name', 'fake'), ('cluster_template', 'template'),
('image', 'ubuntu'), ('user_keypair', 'test'),
('neutron_network', 'net'), ('description', 'descr'),
('transient', True), ('public', True),
('protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.create.assert_called_once_with(
cluster_template_id='ct_id', count=None, default_image_id='img_id',
description='descr', plugin_version='0.1', is_protected=True,
is_public=True, is_transient=True, name='fake', net_id='net_id',
plugin_name='fake', user_keypair_id='test')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Cluster template id',
'Description', 'Id', 'Image',
'Is protected', 'Is public', 'Name',
'Neutron management network', 'Node groups',
'Plugin name', 'Plugin version', 'Status',
'Use autoconfig', 'User keypair id')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'ct_id', 'Cluster template for tests',
'cluster_id', 'img_id', False, False, 'fake',
'net_id', 'fakeng:2', 'fake', '0.1', 'Active', True,
'test')
self.assertEqual(expected_data, data)
def test_cluster_create_with_count(self):
clusters_mock = mock.Mock()
clusters_mock.to_dict.return_value = {
'clusters': [{'cluster': {'id': 'cluster1_id'}},
{'cluster': {'id': 'cluster2_id'}}]
}
self.cl_mock.create.return_value = clusters_mock
arglist = ['--name', 'fake', '--cluster-template', 'template',
'--image', 'ubuntu', '--count', '2']
verifylist = [('name', 'fake'), ('cluster_template', 'template'),
('image', 'ubuntu'), ('count', 2)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.create.assert_called_once_with(
cluster_template_id='ct_id', count=2, default_image_id='img_id',
description=None, plugin_version='0.1', is_protected=False,
is_public=False, is_transient=False, name='fake', net_id=None,
plugin_name='fake', user_keypair_id=None)
# Check that columns are correct
expected_columns = ('fake',)
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('cluster_id',)
self.assertEqual(expected_data, data)
class TestListClusters(TestClusters):
def setUp(self):
super(TestListClusters, self).setUp()
self.cl_mock.list.return_value = [api_cl.Cluster(
None, CLUSTER_INFO)]
# Command to test
self.cmd = osc_cl.ListClusters(self.app, None)
def test_clusters_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version',
'Status']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active')]
self.assertEqual(expected_data, list(data))
def test_clusters_list_long(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version',
'Status', 'Description', 'Image']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active',
'Cluster template for tests', 'img_id')]
self.assertEqual(expected_data, list(data))
def test_clusters_list_extra_search_opts(self):
arglist = ['--plugin', 'fake', '--plugin-version', '0.1', '--name',
'fake']
verifylist = [('plugin', 'fake'), ('plugin_version', '0.1'),
('name', 'fake')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version',
'Status']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active')]
self.assertEqual(expected_data, list(data))
class TestShowCluster(TestClusters):
def setUp(self):
super(TestShowCluster, self).setUp()
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
# Command to test
self.cmd = osc_cl.ShowCluster(self.app, None)
def test_cluster_show(self):
arglist = ['fake']
verifylist = [('cluster', 'fake')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.find_unique.assert_called_once_with(name='fake')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Cluster template id',
'Description', 'Id', 'Image',
'Is protected', 'Is public', 'Name',
'Neutron management network', 'Node groups',
'Plugin name', 'Plugin version', 'Status',
'Use autoconfig', 'User keypair id')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'ct_id', 'Cluster template for tests',
'cluster_id', 'img_id', False, False, 'fake',
'net_id', 'fakeng:2', 'fake', '0.1', 'Active', True,
'test')
self.assertEqual(expected_data, data)
def test_cluster_show_verification(self):
arglist = ['fake', '--verification']
verifylist = [('cluster', 'fake'), ('verification', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.find_unique.assert_called_once_with(name='fake')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Cluster template id',
'Description', 'Health check (some check)', 'Id',
'Image', 'Is protected', 'Is public', 'Name',
'Neutron management network', 'Node groups',
'Plugin name', 'Plugin version', 'Status',
'Use autoconfig', 'User keypair id',
'Verification status')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'ct_id', 'Cluster template for tests', 'GREEN',
'cluster_id', 'img_id', False, False, 'fake',
'net_id', 'fakeng:2', 'fake', '0.1', 'Active', True,
'test', 'GREEN')
self.assertEqual(expected_data, data)
class TestDeleteCluster(TestClusters):
def setUp(self):
super(TestDeleteCluster, self).setUp()
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
# Command to test
self.cmd = osc_cl.DeleteCluster(self.app, None)
def test_cluster_delete(self):
arglist = ['fake']
verifylist = [('cluster', ['fake'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.delete.assert_called_once_with('cluster_id')
class TestUpdateCluster(TestClusters):
def setUp(self):
super(TestUpdateCluster, self).setUp()
self.cl_mock.update.return_value = mock.Mock(
cluster=CLUSTER_INFO.copy())
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
# Command to test
self.cmd = osc_cl.UpdateCluster(self.app, None)
def test_cluster_update_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_cluster_update_nothing_updated(self):
arglist = ['fake']
verifylist = [('cluster', 'fake')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.update.assert_called_once_with('cluster_id')
def test_cluster_update_all_options(self):
arglist = ['fake', '--name', 'fake', '--description', 'descr',
'--public', '--protected']
verifylist = [('cluster', 'fake'), ('name', 'fake'),
('description', 'descr'), ('is_public', True),
('is_protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.update.assert_called_once_with(
'cluster_id', description='descr', is_protected=True,
is_public=True, name='fake')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Cluster template id',
'Description', 'Id', 'Image',
'Is protected', 'Is public', 'Name',
'Neutron management network', 'Node groups',
'Plugin name', 'Plugin version', 'Status',
'Use autoconfig', 'User keypair id')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'ct_id', 'Cluster template for tests',
'cluster_id', 'img_id', False, False, 'fake',
'net_id', 'fakeng:2', 'fake', '0.1', 'Active', True,
'test')
self.assertEqual(expected_data, data)
def test_cluster_update_private_unprotected(self):
arglist = ['fake', '--private', '--unprotected']
verifylist = [('cluster', 'fake'), ('is_public', False),
('is_protected', False)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.update.assert_called_once_with(
'cluster_id', is_protected=False, is_public=False)
class TestScaleCluster(TestClusters):
def setUp(self):
super(TestScaleCluster, self).setUp()
self.cl_mock.scale.return_value = mock.Mock(
cluster=CLUSTER_INFO.copy())
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
# Command to test
self.cmd = osc_cl.ScaleCluster(self.app, None)
def test_cluster_scale_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_cluster_scale_resize(self):
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
arglist = ['fake', '--instances', 'fakeng:1']
verifylist = [('cluster', 'fake'),
('instances', ['fakeng:1'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.scale.assert_called_once_with(
'cluster_id',
{'resize_node_groups': [
{'count': 1,
'name': 'fakeng'}]}
)
# Check that columns are correct
expected_columns = ('Anti affinity', 'Cluster template id',
'Description', 'Id', 'Image',
'Is protected', 'Is public', 'Name',
'Neutron management network', 'Node groups',
'Plugin name', 'Plugin version', 'Status',
'Use autoconfig', 'User keypair id')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'ct_id', 'Cluster template for tests',
'cluster_id', 'img_id', False, False, 'fake',
'net_id', 'fakeng:2', 'fake', '0.1', 'Active', True,
'test')
self.assertEqual(expected_data, data)
def test_cluster_scale_add_ng(self):
new_ng = {'name': 'new', 'id': 'new_id'}
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, new_ng)
arglist = ['fake', '--instances', 'new:1']
verifylist = [('cluster', 'fake'), ('instances', ['new:1'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.scale.assert_called_once_with(
'cluster_id',
{'add_node_groups': [
{'count': 1,
'node_group_template_id': 'new_id',
'name': 'new'}
]})
class TestVerificationUpdateCluster(TestClusters):
def setUp(self):
super(TestVerificationUpdateCluster, self).setUp()
self.cl_mock.find_unique.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
self.cl_mock.verification_update.return_value = api_cl.Cluster(
None, CLUSTER_INFO)
# Command to test
self.cmd = osc_cl.VerificationUpdateCluster(self.app, None)
def test_verification_show(self):
arglist = ['fake', '--show']
verifylist = [('cluster', 'fake'), ('show', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.find_unique.assert_called_once_with(name='fake')
# Check that columns are correct
expected_columns = ('Health check (some check)', 'Verification status')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('GREEN', 'GREEN')
self.assertEqual(expected_data, data)
def test_verification_start(self):
arglist = ['fake', '--start']
verifylist = [('cluster', 'fake'), ('status', 'START')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.cl_mock.verification_update.assert_called_once_with(
'cluster_id', 'START')
|
{
"content_hash": "7850eb013a4fd38a457b60c8d12310e7",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 79,
"avg_line_length": 37.89325842696629,
"alnum_prop": 0.5587842846553002,
"repo_name": "openstack/python-saharaclient",
"id": "99c07b6ed3ed4c6ad349446f626eaa3a4e25644b",
"size": "20818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saharaclient/tests/unit/osc/v2/test_clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "628906"
}
],
"symlink_target": ""
}
|
"""This plugin provides test results in the standard XUnit XML format.
It's designed for the `Jenkins`_ (previously Hudson) continuous build
system, but will probably work for anything else that understands an
XUnit-formatted XML representation of test results.
Add this shell command to your builder ::
nosetests --with-xunit
And by default a file named nosetests.xml will be written to the
working directory.
In a Jenkins builder, tick the box named "Publish JUnit test result report"
under the Post-build Actions and enter this value for Test report XMLs::
**/nosetests.xml
If you need to change the name or location of the file, you can set the
``--xunit-file`` option.
Here is an abbreviated version of what an XML test report might look like::
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
<testcase classname="path_to_test_suite.TestSomething"
name="test_it" time="0">
<error type="exceptions.TypeError" message="oops, wrong type">
Traceback (most recent call last):
...
TypeError: oops, wrong type
</error>
</testcase>
</testsuite>
.. _Jenkins: http://jenkins-ci.org/
"""
import codecs
import doctest
import os
import sys
import traceback
import re
import inspect
from StringIO import StringIO
from time import time
from xml.sax import saxutils
from nose.plugins.base import Plugin
from nose.exc import SkipTest
from nose.pyversion import force_unicode, format_exception
# Invalid XML characters, control characters 0-31 sans \t, \n and \r
CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
def xml_safe(value):
"""Replaces invalid XML characters with '?'."""
return CONTROL_CHARACTERS.sub('?', value)
def escape_cdata(cdata):
"""Escape a string for an XML CDATA section."""
return xml_safe(cdata).replace(']]>', ']]>]]><![CDATA[')
def id_split(idval):
m = TEST_ID.match(idval)
if m:
name, fargs = m.groups()
head, tail = name.rsplit(".", 1)
return [head, tail+fargs]
else:
return idval.rsplit(".", 1)
def nice_classname(obj):
"""Returns a nice name for class object or class instance.
>>> nice_classname(Exception()) # doctest: +ELLIPSIS
'...Exception'
>>> nice_classname(Exception) # doctest: +ELLIPSIS
'...Exception'
"""
if inspect.isclass(obj):
cls_name = obj.__name__
else:
cls_name = obj.__class__.__name__
mod = inspect.getmodule(obj)
if mod:
name = mod.__name__
# jython
if name.startswith('org.python.core.'):
name = name[len('org.python.core.'):]
return "%s.%s" % (name, cls_name)
else:
return cls_name
def exc_message(exc_info):
"""Return the exception's message."""
exc = exc_info[1]
if exc is None:
# str exception
result = exc_info[0]
else:
try:
result = str(exc)
except UnicodeEncodeError:
try:
result = unicode(exc)
except UnicodeError:
# Fallback to args as neither str nor
# unicode(Exception(u'\xe6')) work in Python < 2.6
result = exc.args[0]
result = force_unicode(result, 'UTF-8')
return xml_safe(result)
class Tee(object):
def __init__(self, encoding, *args):
self._encoding = encoding
self._streams = args
def write(self, data):
data = force_unicode(data, self._encoding)
for s in self._streams:
s.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for s in self._streams:
s.flush()
def isatty(self):
return False
class Xunit(Plugin):
"""This plugin provides test results in the standard XUnit XML format."""
name = 'xunit'
score = 1500
encoding = 'UTF-8'
error_report_file = None
def __init__(self):
super(Xunit, self).__init__()
self._capture_stack = []
self._currentStdout = None
self._currentStderr = None
def _timeTaken(self):
if hasattr(self, '_timer'):
taken = time() - self._timer
else:
# test died before it ran (probably error in setup())
# or success/failure added before test started probably
# due to custom TestResult munging
taken = 0.0
return taken
def _quoteattr(self, attr):
"""Escape an XML attribute. Value can be unicode."""
attr = xml_safe(attr)
return saxutils.quoteattr(attr)
def options(self, parser, env):
"""Sets additional command line options."""
Plugin.options(self, parser, env)
parser.add_option(
'--xunit-file', action='store',
dest='xunit_file', metavar="FILE",
default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
help=("Path to xml file to store the xunit report in. "
"Default is nosetests.xml in the working directory "
"[NOSE_XUNIT_FILE]"))
def configure(self, options, config):
"""Configures the xunit plugin."""
Plugin.configure(self, options, config)
self.config = config
if self.enabled:
self.stats = {'errors': 0,
'failures': 0,
'passes': 0,
'skipped': 0
}
self.errorlist = []
self.error_report_file_name = os.path.realpath(options.xunit_file)
def report(self, stream):
"""Writes an Xunit-formatted XML file
The file includes a report of test errors and failures.
"""
self.error_report_file = codecs.open(self.error_report_file_name, 'w',
self.encoding, 'replace')
self.stats['encoding'] = self.encoding
self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ self.stats['passes'] + self.stats['skipped'])
self.error_report_file.write(
u'<?xml version="1.0" encoding="%(encoding)s"?>'
u'<testsuite name="nosetests" tests="%(total)d" '
u'errors="%(errors)d" failures="%(failures)d" '
u'skip="%(skipped)d">' % self.stats)
self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
for e in self.errorlist]))
self.error_report_file.write(u'</testsuite>')
self.error_report_file.close()
if self.config.verbosity > 1:
stream.writeln("-" * 70)
stream.writeln("XML: %s" % self.error_report_file.name)
def _startCapture(self):
self._capture_stack.append((sys.stdout, sys.stderr))
self._currentStdout = StringIO()
self._currentStderr = StringIO()
sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
def startContext(self, context):
self._startCapture()
def stopContext(self, context):
self._endCapture()
def beforeTest(self, test):
"""Initializes a timer before starting a test."""
self._timer = time()
self._startCapture()
def _endCapture(self):
if self._capture_stack:
sys.stdout, sys.stderr = self._capture_stack.pop()
def afterTest(self, test):
self._endCapture()
self._currentStdout = None
self._currentStderr = None
def finalize(self, test):
while self._capture_stack:
self._endCapture()
def _getCapturedStdout(self):
if self._currentStdout:
value = self._currentStdout.getvalue()
if value:
return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
value)
return ''
def _getCapturedStderr(self):
if self._currentStderr:
value = self._currentStderr.getvalue()
if value:
return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
value)
return ''
def addError(self, test, err, capt=None):
"""Add error output to Xunit report.
"""
taken = self._timeTaken()
if issubclass(err[0], SkipTest):
type = 'skipped'
self.stats['skipped'] += 1
else:
type = 'error'
self.stats['errors'] += 1
tb = format_exception(err, self.encoding)
id = test.id()
self.errorlist.append(
u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
{'cls': self._quoteattr(id_split(id)[0]),
'name': self._quoteattr(id_split(id)[-1]),
'taken': taken,
'type': type,
'errtype': self._quoteattr(nice_classname(err[0])),
'message': self._quoteattr(exc_message(err)),
'tb': escape_cdata(tb),
'systemout': self._getCapturedStdout(),
'systemerr': self._getCapturedStderr(),
})
def addFailure(self, test, err, capt=None, tb_info=None):
"""Add failure output to Xunit report.
"""
taken = self._timeTaken()
tb = format_exception(err, self.encoding)
self.stats['failures'] += 1
id = test.id()
self.errorlist.append(
u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
u'</failure>%(systemout)s%(systemerr)s</testcase>' %
{'cls': self._quoteattr(id_split(id)[0]),
'name': self._quoteattr(id_split(id)[-1]),
'taken': taken,
'errtype': self._quoteattr(nice_classname(err[0])),
'message': self._quoteattr(exc_message(err)),
'tb': escape_cdata(tb),
'systemout': self._getCapturedStdout(),
'systemerr': self._getCapturedStderr(),
})
def addSuccess(self, test, capt=None):
"""Add success output to Xunit report.
"""
taken = self._timeTaken()
self.stats['passes'] += 1
id = test.id()
self.errorlist.append(
'<testcase classname=%(cls)s name=%(name)s '
'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
{'cls': self._quoteattr(id_split(id)[0]),
'name': self._quoteattr(id_split(id)[-1]),
'taken': taken,
'systemout': self._getCapturedStdout(),
'systemerr': self._getCapturedStderr(),
})
|
{
"content_hash": "71b5d653d510807608f8691db9d14272",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 80,
"avg_line_length": 33.641337386018236,
"alnum_prop": 0.56035417419588,
"repo_name": "kisel/trex-core",
"id": "e1ec0e1d1c3cd6c4f09703e2079c4de2d4482e9d",
"size": "11068",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16355010"
},
{
"name": "C++",
"bytes": "4832431"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "CSS",
"bytes": "333"
},
{
"name": "HTML",
"bytes": "5012"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "163741"
},
{
"name": "Python",
"bytes": "12389428"
},
{
"name": "Shell",
"bytes": "22573"
}
],
"symlink_target": ""
}
|
import gzip
import math
import os
import re
import tempfile
from tempfile import TemporaryDirectory
import mxnet as mx
import numpy as np
import pytest
from sockeye import __version__
from sockeye import constants as C
from sockeye import utils
@pytest.mark.parametrize("some_list, expected", [
([1, 2, 3, 4, 5, 6, 7, 8], [[1, 2, 3], [4, 5, 6], [7, 8]]),
([1, 2], [[1, 2]]),
([1, 2, 3], [[1, 2, 3]]),
([1, 2, 3, 4], [[1, 2, 3], [4]]),
])
def test_chunks(some_list, expected):
chunk_size = 3
chunked_list = list(utils.chunks(some_list, chunk_size))
assert chunked_list == expected
def test_get_alignments():
attention_matrix = np.asarray([[0.1, 0.4, 0.5],
[0.2, 0.8, 0.0],
[0.4, 0.4, 0.2]])
test_cases = [(0.5, [(1, 1)]),
(0.8, []),
(0.1, [(0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 2)])]
for threshold, expected_alignment in test_cases:
alignment = list(utils.get_alignments(attention_matrix, threshold=threshold))
assert alignment == expected_alignment
device_params = [([-4, 3, 5], 6, [0, 1, 2, 3, 4, 5]),
([-2, 3, -2, 5], 6, [0, 1, 2, 3, 4, 5]),
([-1], 1, [0]),
([1], 1, [1])]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params)
def test_expand_requested_device_ids(requested_device_ids, num_gpus_available, expected):
assert set(utils._expand_requested_device_ids(requested_device_ids, num_gpus_available)) == set(expected)
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params)
def test_acquire_gpus(tmpdir, requested_device_ids, num_gpus_available, expected):
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as acquired_gpus:
assert set(acquired_gpus) == set(expected)
# make sure the master lock does not exist anymore after acquiring
# (but rather just one lock per acquired GPU)
assert len(tmpdir.listdir()) == len(acquired_gpus)
# We expect the following settings to raise a ValueError
device_params_expected_exception = [
# requesting the same gpu twice
([-4, 3, 3, 5], 5),
# too few GPUs available
([-4, 3, 5], 5),
([3, 5], 1),
([-2], 1),
([-1, -1], 1)]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available", device_params_expected_exception)
def test_expand_requested_device_ids_exception(requested_device_ids, num_gpus_available):
with pytest.raises(ValueError):
utils._expand_requested_device_ids(requested_device_ids, num_gpus_available)
@pytest.mark.parametrize("requested_device_ids, num_gpus_available", device_params_expected_exception)
def test_acquire_gpus_exception(tmpdir, requested_device_ids, num_gpus_available):
with pytest.raises(ValueError):
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as _:
pass
# Let's assume GPU 1 is locked already
device_params_1_locked = [([-4, 3, 5], 7, [0, 2, 3, 4, 5, 6]),
([-2, 3, -2, 5], 7, [0, 2, 3, 4, 5, 6])]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params_1_locked)
def test_acquire_gpus_1_locked(tmpdir, requested_device_ids, num_gpus_available, expected):
gpu_1 = 1
with utils.GpuFileLock([gpu_1], str(tmpdir)) as lock:
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as acquired_gpus:
assert set(acquired_gpus) == set(expected)
def test_acquire_gpus_exception_propagation(tmpdir):
raised_exception = RuntimeError("This exception should be propagated properly.")
caught_exception = None
try:
with utils.acquire_gpus([-1, 4, -1], lock_dir=str(tmpdir), num_gpus_available=12) as _:
raise raised_exception
except Exception as e:
caught_exception = e
assert caught_exception is raised_exception
def test_gpu_file_lock_cleanup(tmpdir):
gpu_id = 0
candidates = [gpu_id]
# Test that the lock files get created and clean up
with utils.GpuFileLock(candidates, str(tmpdir)) as lock:
assert lock == gpu_id
assert tmpdir.join("sockeye.gpu0.lock").check(), "Lock file did not exist."
assert not tmpdir.join("sockeye.gpu1.lock").check(), "Unrelated lock file did exist"
assert not tmpdir.join("sockeye.gpu0.lock").check(), "Lock file was not cleaned up."
def test_gpu_file_lock_exception_propagation(tmpdir):
gpu_ids = [0]
# Test that exceptions are properly propagated
raised_exception = RuntimeError("This exception should be propagated properly.")
caught_exception = None
try:
with utils.GpuFileLock(gpu_ids, str(tmpdir)) as lock:
raise raised_exception
except Exception as e:
caught_exception = e
assert caught_exception is raised_exception
def test_gpu_file_lock_locking(tmpdir):
# the second time we try to acquire a lock for the same device we should not succeed
gpu_id = 0
candidates = [gpu_id]
with utils.GpuFileLock(candidates, str(tmpdir)) as lock_inner:
assert lock_inner == 0
with utils.GpuFileLock(candidates, str(tmpdir)) as lock_outer:
assert lock_outer is None
def test_gpu_file_lock_permission_exception(tmpdir):
tmpdir = tmpdir.mkdir("sub")
existing_lock = tmpdir.join("sockeye.gpu0.lock")
# remove permissions
existing_lock.write("")
existing_lock.chmod(0)
with utils.GpuFileLock([0, 1], str(tmpdir)) as acquired_lock:
# We expect to ignore the file for which we do not have permission and acquire the other device instead
assert acquired_lock == 1
def test_check_condition_true():
utils.check_condition(1 == 1, "Nice")
def test_check_condition_false():
with pytest.raises(utils.SockeyeError) as e:
utils.check_condition(1 == 2, "Wrong")
assert "Wrong" == str(e.value)
@pytest.mark.parametrize("version_string,expected_version", [("1.0.3", ("1", "0", "3")),
("1.0.2.3", ("1", "0", "2.3"))])
def test_parse_version(version_string, expected_version):
assert expected_version == utils.parse_version(version_string)
def test_check_version_disregards_minor():
release, major, minor = utils.parse_version(__version__)
other_minor_version = "%s.%s.%d" % (release, major, int(minor) + 1)
utils.check_version(other_minor_version)
def _get_later_major_version():
release, major, minor = utils.parse_version(__version__)
return "%s.%d.%s" % (release, int(major) + 1, minor)
def test_check_version_checks_major():
version = _get_later_major_version()
with pytest.raises(utils.SockeyeError) as e:
utils.check_version(version)
assert "Given major version (%s) does not match major code version (%s)" % (version, __version__) == str(e.value)
def test_version_matches_changelog():
"""
Tests whether the last version mentioned in CHANGELOG.md matches the sockeye version (sockeye/__init__.py).
"""
pattern = re.compile(r'''## \[([0-9.]+)\]''')
changelog = open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "CHANGELOG.md")).read()
last_changelog_version = pattern.findall(changelog)[0]
assert __version__ == last_changelog_version
@pytest.mark.parametrize("samples,expected_mean, expected_variance",
[
([1, 2], 1.5, 0.25),
([4., 100., 12., -3, 1000, 1., -200], 130.57142857142858, 132975.38775510204),
])
def test_online_mean_and_variance(samples, expected_mean, expected_variance):
mean_and_variance = utils.OnlineMeanAndVariance()
for sample in samples:
mean_and_variance.update(sample)
assert np.isclose(mean_and_variance.mean, expected_mean)
assert np.isclose(mean_and_variance.variance, expected_variance)
@pytest.mark.parametrize("samples,expected_mean",
[
([], 0.),
([5.], 5.),
])
def test_online_mean_and_variance_nan(samples, expected_mean):
mean_and_variance = utils.OnlineMeanAndVariance()
for sample in samples:
mean_and_variance.update(sample)
assert np.isclose(mean_and_variance.mean, expected_mean)
assert math.isnan(mean_and_variance.variance)
get_tokens_tests = [("this is a line \n", ["this", "is", "a", "line"]),
(" a \tb \r \n", ["a", "b"])]
@pytest.mark.parametrize("line, expected_tokens", get_tokens_tests)
def test_get_tokens(line, expected_tokens):
tokens = list(utils.get_tokens(line))
assert tokens == expected_tokens
def test_average_arrays():
n = 4
shape = (12, 14)
arrays = [np.random.uniform(0, 1, (12, 14)) for _ in range(n)]
expected_average = np.zeros(shape)
for array in arrays:
expected_average += array
expected_average /= 4
mx_arrays = [mx.nd.array(a) for a in arrays]
assert np.isclose(utils.average_arrays(mx_arrays).asnumpy(), expected_average).all()
with pytest.raises(utils.SockeyeError) as e:
other_shape = (12, 13)
utils.average_arrays(mx_arrays + [mx.nd.zeros(other_shape)])
assert "nd array shapes do not match" == str(e.value)
def test_save_and_load_params():
array = mx.nd.uniform(0, 1, (10, 12))
arg_params = {"array": array}
aux_params = {"array": array}
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "params")
utils.save_params(arg_params, path, aux_params=aux_params)
params = mx.nd.load(path)
assert len(params.keys()) == 2
assert "arg:array" in params.keys()
assert "aux:array" in params.keys()
loaded_arg_params, loaded_aux_params = utils.load_params(path)
assert "array" in loaded_arg_params
assert "array" in loaded_aux_params
assert np.isclose(loaded_arg_params['array'].asnumpy(), array.asnumpy()).all()
assert np.isclose(loaded_aux_params['array'].asnumpy(), array.asnumpy()).all()
def test_print_value():
data = mx.sym.Variable("data")
weights = mx.sym.Variable("weights")
softmax_label = mx.sym.Variable("softmax_label")
fc = mx.sym.FullyConnected(data=data, num_hidden=128, weight=weights, no_bias=True)
out = mx.sym.SoftmaxOutput(data=fc, label=softmax_label, name="softmax")
fc_print = mx.sym.Custom(op_type="PrintValue", data=fc, print_name="FullyConnected")
out_print = mx.sym.SoftmaxOutput(data=fc_print, label=softmax_label, name="softmax")
data_np = np.random.rand(1, 256)
weights_np = np.random.rand(128, 256)
label_np = np.random.rand(1, 128)
executor_base = out.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
executor_base.arg_dict["data"][:] = data_np
executor_base.arg_dict["weights"][:] = weights_np
executor_base.arg_dict["softmax_label"][:] = label_np
executor_print = out_print.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
executor_print.arg_dict["data"][:] = data_np
executor_print.arg_dict["weights"][:] = weights_np
executor_print.arg_dict["softmax_label"][:] = label_np
output_base = executor_base.forward(is_train=True)[0]
output_print = executor_print.forward(is_train=True)[0]
assert np.isclose(output_base.asnumpy(), output_print.asnumpy()).all()
executor_base.backward()
executor_print.backward()
assert np.isclose(executor_base.grad_arrays[1].asnumpy(), executor_print.grad_arrays[1].asnumpy()).all()
@pytest.mark.parametrize("new, old, metric, result",
[(0, 0, C.PERPLEXITY, False),
(1.0, 1.0, C.PERPLEXITY, False),
(1.0, 0.9, C.PERPLEXITY, False),
(0.99, 1.0, C.PERPLEXITY, True),
(C.LARGE_POSITIVE_VALUE, np.inf, C.PERPLEXITY, True),
(0, 0, C.BLEU, False),
(1.0, 1.0, C.BLEU, False),
(1.0, 0.9, C.BLEU, True),
(0.99, 1.0, C.BLEU, False),
(C.LARGE_POSITIVE_VALUE, np.inf, C.BLEU, False),
])
def test_metric_value_is_better(new, old, metric, result):
assert utils.metric_value_is_better(new, old, metric) == result
@pytest.mark.parametrize("num_factors", [1, 2, 3])
def test_split(num_factors):
batch_size = 4
bucket_key = 10
# Simulates splitting factored input
data = mx.nd.random.normal(shape=(batch_size, bucket_key, num_factors))
result = utils.split(data, num_outputs=num_factors, axis=2, squeeze_axis=True)
assert isinstance(result, list)
assert result[0].shape == (batch_size, bucket_key)
def test_get_num_gpus():
assert utils.get_num_gpus() >= 0
def _touch_file(fname, compressed: bool, empty: bool) -> str:
if compressed:
open_func = gzip.open
else:
open_func = open
with open_func(fname, encoding='utf8', mode='wt') as f:
if not empty:
for i in range(10):
print(str(i), file=f)
return fname
def test_is_gzip_file():
with TemporaryDirectory() as temp:
fname = os.path.join(temp, 'test')
assert utils.is_gzip_file(_touch_file(fname, compressed=True, empty=True))
assert utils.is_gzip_file(_touch_file(fname, compressed=True, empty=False))
assert not utils.is_gzip_file(_touch_file(fname, compressed=False, empty=True))
assert not utils.is_gzip_file(_touch_file(fname, compressed=False, empty=False))
def test_smart_open_without_suffix():
with TemporaryDirectory() as temp:
fname = os.path.join(temp, 'test')
_touch_file(fname, compressed=True, empty=False)
with utils.smart_open(fname) as fin:
assert len(fin.readlines()) == 10
_touch_file(fname, compressed=False, empty=False)
with utils.smart_open(fname) as fin:
assert len(fin.readlines()) == 10
@pytest.mark.parametrize("data,expected_lengths", [
(mx.nd.array([[1, 2, 0], [1, 0, 0], [0, 0, 0]]), mx.nd.array([2, 1, 0]))
])
def test_compute_lengths(data, expected_lengths):
lengths = utils.compute_lengths(mx.sym.Variable('data')).eval(data=data)[0]
assert (lengths.asnumpy() == expected_lengths.asnumpy()).all()
|
{
"content_hash": "6754e6632c1fca5c32aa5a03f749b8e8",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 118,
"avg_line_length": 38.69895287958115,
"alnum_prop": 0.621254143272678,
"repo_name": "artemsok/sockeye",
"id": "caee1408f8df9d786b2ea2ca4166342bb3ec8396",
"size": "15349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "824"
},
{
"name": "Dockerfile",
"bytes": "1028"
},
{
"name": "JavaScript",
"bytes": "4196"
},
{
"name": "Python",
"bytes": "1548538"
},
{
"name": "Shell",
"bytes": "7456"
}
],
"symlink_target": ""
}
|
__author__ = 'Timo Boldt'
__version__ = '1.0.0'
import time
import libraries.Zlib as Zlib
class Server:
def __init__(self, data, logger, config):
# get global data
self.__data = data
# load configuration
self.__config = config.get_config()
self.__config_network = config.get_network()
# set logging options and create logger
self.__logger = logger
# wait length (time.sleep(x))
self.__sleep_interval = float(self.__config_network["interval"])
# networking options
self.__host = self.__config_network["host"]
self.__port = self.__config_network["port"]
# server instance (will be instantiated in start)
self.__app = None
# lets prepare our server instance
self.__prepare()
def __prepare(self):
# create instance of ZeroMQ Server with defined configuration
self.__logger.log("create server instance", "debug")
# create an instance of Zserver
if self.__host and self.__port:
self.__app = Zlib.Zlib(self.__host, self.__port)
self.__app.set_logger(self.__logger)
try:
self.__app.server()
except Exception as e:
print(e)
def run(self, data, run_event):
while run_event.is_set():
try:
# send response to client
self.__app.send(data)
except Exception as e:
pass
time.sleep(self.__sleep_interval)
def worker(data, logger, config, run_event):
obj = Server(data, logger, config)
obj.run(data, run_event)
|
{
"content_hash": "2efe033e5642952b20e3b45e7af641e5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 28.603448275862068,
"alnum_prop": 0.5581675708257987,
"repo_name": "vibe-x/robotic",
"id": "e4ec506c633f17a7b89d4f2c795c6df1ec30c938",
"size": "1659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/Server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "272840"
}
],
"symlink_target": ""
}
|
import pylab
from random import normalvariate, uniform
def generate_data(n, generator_type):
epsilon_values = []
for i in range(n):
e = generator_type(0, 1)
epsilon_values.append(e)
return epsilon_values
data = generate_data(100, uniform)
pylab.plot(data, 'b-')
pylab.show()
|
{
"content_hash": "842502ad57478bef0b7b65c18126c0fd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 41,
"avg_line_length": 22.071428571428573,
"alnum_prop": 0.6634304207119741,
"repo_name": "28ideas/quant-econ",
"id": "f452650854e4008e035c258fb47bfa08d62ac999",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/test_program_6.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import re
import sys
import logging
from pynsett.discourse import Discourse
from pynsett.drt import Drs, DrsRule
from pynsett.knowledge.drs_ner_cleaner import DrsNERCleaner
from pynsett.metric import MetricFactory
_logger = logging.getLogger(__name__)
def _get_list_of_rules_from_text(text):
lines = []
for line in text.split('\n'):
if not line.strip() or line.strip()[0] == '#':
continue
lines.append(line)
lines = '\n'.join(lines).split(';')
return lines
def _substitute_text_in_match_statement_with_graph(text, substitution_triggers):
drs_cleaner = DrsNERCleaner(substitution_triggers)
p = re.compile('MATCH.*\"(.*)\"')
lst = p.findall(text)
if not lst:
p = re.compile('MATCH.*\'(.*)\'')
lst = p.findall(text)
for item in lst:
try:
drs = Discourse(item).connected_components[0]
except IndexError:
_logger.warning('Cannot use Discourse on %s' % item[:200])
drs = Drs.create_from_natural_language(item)
drs = drs.apply(drs_cleaner)
text = text.replace('"' + item + '"', str(drs))
return text
def _substitute_relationship_code_in_create_statement_with_graph(text):
matching_variables = []
p = re.compile('CREATE.*\((.*)\)')
lst = p.findall(text)
for item in lst:
try:
elements = item.split()
relation_name = elements[0]
source = elements[1]
target = elements[2]
matching_variables += [source, target]
new_text = "{}(%s), {'type': 'relation', 'text': '%s'}(%s,%s), {}(%s)" \
% (source, relation_name, source, target, target)
text = text.replace('(' + item + ')', new_text)
except:
_logger.warning('Text ' + text + " cannot parse to a relation")
return text, matching_variables
def _get_substition_rule(line):
p = re.compile('DEFINE(.*)AS(.*)')
lst = p.findall(line)
for item1, item2 in lst:
return item1, item2
def _create_list_from_string(string):
string = string.strip()
if not string or len(string) < 2:
return []
string = string[1:-1]
lst = string.split(',')
lst = [item.strip() for item in lst]
return lst
def _looks_like_list(string):
string = string.strip()
if string[0] == '[' and string[-1] == ']':
return True
return False
def _substitute_list_into_metric(metric, substitution):
subst_name = substitution[0].strip()
if _looks_like_list(substitution[1]):
subst_list = _create_list_from_string(substitution[1])
metric.add_substitution(subst_name, subst_list)
return metric
def _substitute_string_into_rule(rule_str, substitution):
subst_from = substitution[0].strip()
subst_to = substitution[1].strip()
rule_str = rule_str.replace(subst_from, subst_to)
return rule_str
class Knowledge:
def __init__(self, metric=MetricFactory.get_best_available_metric()):
self._rules = []
self._metric = metric
self._substitution_list = []
def add_drs(self, drs, sentence_number=None):
pass
def add_rule(self, rule, weight=1.0):
self._rules.append((rule, weight))
def add_rules(self, text):
from ..auxiliary import LineFinder
line_finder = LineFinder(text)
substitution_triggers = []
rules_lines = _get_list_of_rules_from_text(text)
for rule_text in rules_lines:
original_rule_text = rule_text
if not rule_text.strip():
continue
for s in self._substitution_list:
rule_text = _substitute_string_into_rule(rule_text, s)
rule_text = _substitute_text_in_match_statement_with_graph(rule_text, substitution_triggers)
rule_text, matching_variables = _substitute_relationship_code_in_create_statement_with_graph(rule_text)
substitution = _get_substition_rule(rule_text)
if substitution:
self.metric = _substitute_list_into_metric(self._metric, substitution)
substitution_triggers.append(substitution[0].strip())
if not _looks_like_list(substitution[1]):
self._substitution_list.append(substitution)
continue
try:
rule = DrsRule(rule_text, self._metric, matching_variables)
rule.test()
self.add_rule(rule)
except SyntaxError:
sys.stderr.write('Error in line ' + str(line_finder.get_line_number(original_rule_text)) + ':\n')
sys.stderr.write(original_rule_text + '\n')
sys.stderr.flush()
finally:
pass
def ask_drs(self, drs):
pass
def ask_rule(self, drs):
return self._rules
def ask_rule_fw(self, drs):
return self._rules
def ask_rule_bw(self, drs):
return self._rules
|
{
"content_hash": "ebe590c4dfeeb850fb9b0dea2e72cee6",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 115,
"avg_line_length": 33.8421052631579,
"alnum_prop": 0.5713452566096423,
"repo_name": "fractalego/pynsett",
"id": "92736d0978132d3f98d8adbf29f7be3edd18371f",
"size": "5144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynsett/knowledge/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "874"
},
{
"name": "Dockerfile",
"bytes": "654"
},
{
"name": "HTML",
"bytes": "3274"
},
{
"name": "JavaScript",
"bytes": "3075"
},
{
"name": "Python",
"bytes": "93655"
}
],
"symlink_target": ""
}
|
import unittest
import asa_modules.ssh
from asa_tests.mock_ssh import MockSsh
asa_modules.ssh.Ssh = MockSsh
from asa_lib import Asa
Asa.Ssh = MockSsh
class TestAsaBase(unittest.TestCase):
def setUp(self):
self.asa = Asa('192.168.0.1', 'john', 'uber_secure_pw', '')
self.asa.login()
self.asa.set_enable_mode()
self.asa.set_terminal_pager(0)
self.asa.get_configuration()
def test_hostname(self):
self.assertEquals(
'192.168.0.1', self.asa.ssh_session.hostname
)
def test_username(self):
self.assertEquals(
'john', self.asa.ssh_session.username
)
def test_password(self):
self.assertEquals(
'uber_secure_pw', self.asa.ssh_session.password
)
def test_set_enable_call(self):
self.asa = Asa('192.168.0.1', 'john', 'uber_secure_pw', '')
self.asa.login()
self.assertEquals(
True, self.asa.set_enable_mode()
)
def test_unset_enable_call(self):
self.assertEquals(
False, self.asa.unset_enable_mode()
)
def test_is_enable_returns_bool(self):
self.assertEquals(
bool, type(self.asa.is_enable_mode())
)
def test_write_config_sends_save_command(self):
self.assertEquals(
'written', self.asa.save_running_configuration()
)
|
{
"content_hash": "297736bde99b3360026d2204c199412b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 67,
"avg_line_length": 25.509090909090908,
"alnum_prop": 0.5880256593014968,
"repo_name": "Flexin1981/AsaLib",
"id": "dd4800e841c5c1652f88789f59f36d34b31061ef",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asa_tests/test_asa_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34628"
}
],
"symlink_target": ""
}
|
try:
extra_coverage
except NameError:
print("SKIP")
raise SystemExit
import uerrno
import uio
data = extra_coverage()
# test hashing of str/bytes that have an invalid hash
print(data[0], data[1])
print(hash(data[0]))
print(hash(data[1]))
print(hash(bytes(data[0], 'utf8')))
print(hash(str(data[1], 'utf8')))
# test streams
stream = data[2] # has set_error and set_buf. Write always returns error
stream.set_error(uerrno.EAGAIN) # non-blocking error
print(stream.read()) # read all encounters non-blocking error
print(stream.read(1)) # read 1 byte encounters non-blocking error
print(stream.readline()) # readline encounters non-blocking error
print(stream.readinto(bytearray(10))) # readinto encounters non-blocking error
print(stream.write(b'1')) # write encounters non-blocking error
print(stream.write1(b'1')) # write1 encounters non-blocking error
stream.set_buf(b'123')
print(stream.read(4)) # read encounters non-blocking error after successful reads
stream.set_buf(b'123')
print(stream.read1(4)) # read1 encounters non-blocking error after successful reads
stream.set_buf(b'123')
print(stream.readline(4)) # readline encounters non-blocking error after successful reads
try:
print(stream.ioctl(0, 0)) # ioctl encounters non-blocking error; raises OSError
except OSError:
print('OSError')
stream.set_error(0)
print(stream.ioctl(0, bytearray(10))) # successful ioctl call
stream2 = data[3] # is textio
print(stream2.read(1)) # read 1 byte encounters non-blocking error with textio stream
# test BufferedWriter with stream errors
stream.set_error(uerrno.EAGAIN)
buf = uio.BufferedWriter(stream, 8)
print(buf.write(bytearray(16)))
# test basic import of frozen scripts
import frzstr1
import frzmpy1
# test import of frozen packages with __init__.py
import frzstr_pkg1
print(frzstr_pkg1.x)
import frzmpy_pkg1
print(frzmpy_pkg1.x)
# test import of frozen packages without __init__.py
from frzstr_pkg2.mod import Foo
print(Foo.x)
from frzmpy_pkg2.mod import Foo
print(Foo.x)
# test raising exception in frozen script
try:
import frzmpy2
except ZeroDivisionError:
print('ZeroDivisionError')
# test loading a resource from a frozen string
import uio
buf = uio.resource_stream('frzstr_pkg2', 'mod.py')
print(buf.read(21))
|
{
"content_hash": "7aa3d30a1b4e3be8b914ba82ff2f9383",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 89,
"avg_line_length": 30.513513513513512,
"alnum_prop": 0.7559787422497786,
"repo_name": "tralamazza/micropython",
"id": "13721f1f479c3f90e592253fb86d7b14b1cc0160",
"size": "2258",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unix/extra_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "122184"
},
{
"name": "C",
"bytes": "12839094"
},
{
"name": "C++",
"bytes": "582442"
},
{
"name": "CMake",
"bytes": "800"
},
{
"name": "Makefile",
"bytes": "129015"
},
{
"name": "Objective-C",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "901335"
},
{
"name": "Shell",
"bytes": "15745"
}
],
"symlink_target": ""
}
|
"""
Makes a Batch Processing Request to Document AI
Creates request with full directory in Cloud Storage
"""
import re
from typing import List
from google.api_core.client_options import ClientOptions
from google.cloud import documentai_v1 as documentai
from google.cloud import storage
PROJECT_ID = "YOUR_PROJECT_ID"
LOCATION = "YOUR_PROJECT_LOCATION" # Format is 'us' or 'eu'
PROCESSOR_ID = "YOUR_PROCESSOR_ID" # Create processor in Cloud Console
# Format 'gs://input_bucket/directory'
GCS_INPUT_PREFIX = "gs://cloud-samples-data/documentai/codelabs/ocr/multi-document"
# Format 'gs://output_bucket/directory'
GCS_OUTPUT_URI = "YOUR_OUTPUT_BUCKET_URI"
# Instantiates a client
docai_client = documentai.DocumentProcessorServiceClient(
client_options=ClientOptions(api_endpoint=f"{LOCATION}-documentai.googleapis.com")
)
# The full resource name of the processor, e.g.:
# projects/project-id/locations/location/processor/processor-id
# You must create new processors in the Cloud Console first
RESOURCE_NAME = docai_client.processor_path(PROJECT_ID, LOCATION, PROCESSOR_ID)
# Cloud Storage URI for the Input Directory
gcs_prefix = documentai.GcsPrefix(gcs_uri_prefix=GCS_INPUT_PREFIX)
# Load GCS Input URI into Batch Input Config
input_config = documentai.BatchDocumentsInputConfig(gcs_prefix=gcs_prefix)
# Cloud Storage URI for Output directory
gcs_output_config = documentai.DocumentOutputConfig.GcsOutputConfig(
gcs_uri=GCS_OUTPUT_URI
)
# Load GCS Output URI into OutputConfig object
output_config = documentai.DocumentOutputConfig(gcs_output_config=gcs_output_config)
# Configure Process Request
request = documentai.BatchProcessRequest(
name=RESOURCE_NAME,
input_documents=input_config,
document_output_config=output_config,
)
# Batch Process returns a Long Running Operation (LRO)
operation = docai_client.batch_process_documents(request)
# Continually polls the operation until it is complete.
# This could take some time for larger files
# Format: projects/PROJECT_NUMBER/locations/LOCATION/operations/OPERATION_ID
print(f"Waiting for operation {operation.operation.name} to complete...")
operation.result()
# NOTE: Can also use callbacks for asynchronous processing
#
# def my_callback(future):
# result = future.result()
#
# operation.add_done_callback(my_callback)
print("Document processing complete.")
# Once the operation is complete,
# get output document information from operation metadata
metadata = documentai.BatchProcessMetadata(operation.metadata)
if metadata.state != documentai.BatchProcessMetadata.State.SUCCEEDED:
raise ValueError(f"Batch Process Failed: {metadata.state_message}")
documents: List[documentai.Document] = []
# Storage Client to retrieve the output files from GCS
storage_client = storage.Client()
# One process per Input Document
# pylint: disable=not-an-iterable
for process in metadata.individual_process_statuses:
# output_gcs_destination format: gs://BUCKET/PREFIX/OPERATION_NUMBER/0
# The GCS API requires the bucket name and URI prefix separately
output_bucket, output_prefix = re.match(
r"gs://(.*?)/(.*)", process.output_gcs_destination
).groups()
# Get List of Document Objects from the Output Bucket
output_blobs = storage_client.list_blobs(output_bucket, prefix=output_prefix)
# DocAI may output multiple JSON files per source file
for blob in output_blobs:
# Document AI should only output JSON files to GCS
if ".json" not in blob.name:
print(f"Skipping non-supported file type {blob.name}")
continue
print(f"Fetching {blob.name}")
# Download JSON File and Convert to Document Object
document = documentai.Document.from_json(
blob.download_as_bytes(), ignore_unknown_fields=True
)
documents.append(document)
# Print Text from all documents
# Truncated at 100 characters for brevity
for document in documents:
print(document.text[:100])
|
{
"content_hash": "06d9f8c1af8dd419ee4781caea9e3951",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 34.31896551724138,
"alnum_prop": 0.7548354684752575,
"repo_name": "GoogleCloudPlatform/document-ai-samples",
"id": "3be720563e5bae11c5471e556c53f0c946403230",
"size": "3999",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "community/codelabs/docai-ocr/batch_processing_directory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8873"
},
{
"name": "Dockerfile",
"bytes": "1815"
},
{
"name": "HTML",
"bytes": "18246"
},
{
"name": "JavaScript",
"bytes": "20404"
},
{
"name": "Jupyter Notebook",
"bytes": "24192"
},
{
"name": "Procfile",
"bytes": "72"
},
{
"name": "Python",
"bytes": "148860"
},
{
"name": "SCSS",
"bytes": "2034"
},
{
"name": "Shell",
"bytes": "3321"
},
{
"name": "TypeScript",
"bytes": "65655"
}
],
"symlink_target": ""
}
|
import json
from sys import stdin
print(json.dumps({
"notes": json.loads(stdin.read())["blocks"][0]
}, indent = 4))
|
{
"content_hash": "d7c1b1c6f604770e86e6cffe7c87bd98",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 50,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.6528925619834711,
"repo_name": "alexander-liao/hack-the-north-2017",
"id": "f4cf399cae63207ab95c93c86cc0ac988edc2099",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acc-gen/selector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1631"
},
{
"name": "HTML",
"bytes": "1994"
},
{
"name": "JavaScript",
"bytes": "16252"
},
{
"name": "Python",
"bytes": "19583"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
}
|
import braintree
from braintree.error_result import ErrorResult
from braintree.successful_result import SuccessfulResult
from braintree.exceptions.not_found_error import NotFoundError
from braintree.oauth_credentials import OAuthCredentials
import sys
from urllib.parse import quote_plus
from functools import reduce
class OAuthGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def create_token_from_code(self, params):
params["grant_type"] = "authorization_code"
return self._create_token(params)
def create_token_from_refresh_token(self, params):
params["grant_type"] = "refresh_token"
return self._create_token(params)
def revoke_access_token(self, access_token):
self.config.assert_has_client_credentials()
response = self.config.http().post("/oauth/revoke_access_token", {
"token": access_token
})
if "result" in response and response["result"]["success"]:
return SuccessfulResult
else:
return ErrorResult(self.gateway, "could not revoke access token")
def _create_token(self, params):
self.config.assert_has_client_credentials()
response = self.config.http().post("/oauth/access_tokens", {
"credentials": params
})
if "credentials" in response:
return SuccessfulResult({"credentials": OAuthCredentials(self.gateway, response["credentials"])})
else:
return ErrorResult(self.gateway, response["api_error_response"])
def connect_url(self, raw_params):
params = {"client_id": self.config.client_id}
params.update(raw_params)
user_params = self._sub_query(params, "user")
business_params = self._sub_query(params, "business")
def clean_values(accumulator, kv_pair):
key, value = kv_pair
if isinstance(value, list):
accumulator += [(key + "[]", v) for v in value]
else:
accumulator += [(key, value)]
return accumulator
params = reduce(clean_values, params.items(), [])
query = params + user_params + business_params
query_string = "&".join(quote_plus(key) + "=" + quote_plus(value) for key, value in query)
return self.config.environment.base_url + "/oauth/connect?" + query_string
def _sub_query(self, params, root):
if root in params:
sub_query = params.pop(root)
else:
sub_query = {}
query = [(root + "[" + key + "]", str(value)) for key, value in sub_query.items()]
return query
|
{
"content_hash": "e226313f4d4aa24c1ea7d7258554ae8b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 109,
"avg_line_length": 37.732394366197184,
"alnum_prop": 0.6252332960059723,
"repo_name": "braintree/braintree_python",
"id": "5b6b25c286ad49da95d2203ff786e34bdbc45aa7",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "braintree/oauth_gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "1338636"
},
{
"name": "Ruby",
"bytes": "2099"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
}
|
"""
tests.unit.test_virtualname
~~~~~~~~~~~~~~~~~~~~
"""
import importlib.util
import logging
import os
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class FakeEntry:
def __init__(self, name, path, is_file=True):
self.name = name
self.path = path
self._is_file = is_file
def is_file(self):
return self._is_file
class VirtualNameTestCase(TestCase):
"""
Test that the virtualname is in the module name, to speed up lookup of
modules.
"""
maxDiff = None
@staticmethod
def _import_module(testpath):
spec = importlib.util.spec_from_file_location("tmpmodule", testpath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _check_modules(self, path):
"""
check modules in directory
"""
ret = []
for entry in os.listdir(path):
name, path = os.path.splitext(os.path.basename(entry))[0], entry
if name.startswith(".") or name.startswith("_"):
continue
if os.path.isfile(path) and not name.endswith(".py"):
continue
testpath = (
path if os.path.isfile(path) else os.path.join(path, "__init__.py")
)
module = self._import_module(testpath)
if hasattr(module, "__virtualname__"):
if module.__virtualname__ not in name:
ret.append(
'Virtual name "{}" is not in the module filename "{}": {}'.format(
module.__virtualname__, name, path
)
)
return ret
def test_check_virtualname(self):
"""
Test that the virtualname is in __name__ of the module
"""
errors = []
for entry in os.listdir(RUNTIME_VARS.SALT_CODE_DIR):
name, path = os.path.splitext(os.path.basename(entry))[0], entry
if name.startswith(".") or name.startswith("_") or not os.path.isdir(path):
continue
if name in ("cli", "defaults", "spm", "daemons", "ext", "templates"):
continue
if name == "cloud":
entry = os.path.join(RUNTIME_VARS.SALT_CODE_DIR, "cloud", "clouds")
errors.extend(self._check_modules(entry))
for error in errors:
log.critical(error)
assert not errors
|
{
"content_hash": "dcbfa3704fd57df8113250fb4924aba3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 90,
"avg_line_length": 31.493827160493826,
"alnum_prop": 0.5413563308506468,
"repo_name": "saltstack/salt",
"id": "031abd4e80b14976c7faf44cd4b5239db7c4910e",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_virtualname.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe, json
import frappe.widgets.form.meta
import frappe.widgets.form.load
from frappe import _
@frappe.whitelist()
def remove_attach():
"""remove attachment"""
import frappe.utils.file_manager
fid = frappe.form_dict.get('fid')
return frappe.utils.file_manager.remove_file(fid)
@frappe.whitelist()
def get_fields():
"""get fields"""
r = {}
args = {
'select':frappe.form_dict.get('select')
,'from':frappe.form_dict.get('from')
,'where':frappe.form_dict.get('where')
}
ret = frappe.db.sql("select %(select)s from `%(from)s` where %(where)s limit 1" % args)
if ret:
fl, i = frappe.form_dict.get('fields').split(','), 0
for f in fl:
r[f], i = ret[0][i], i+1
frappe.response['message']=r
@frappe.whitelist()
def validate_link():
"""validate link when updated by user"""
import frappe
import frappe.utils
value, options, fetch = frappe.form_dict.get('value'), frappe.form_dict.get('options'), frappe.form_dict.get('fetch')
# no options, don't validate
if not options or options=='null' or options=='undefined':
frappe.response['message'] = 'Ok'
return
if frappe.db.sql("select name from `tab%s` where name=%s" % (options, '%s'), (value,)):
# get fetch values
if fetch:
# escape with "`"
fetch = ", ".join(("`{0}`".format(f.strip()) for f in fetch.split(",")))
frappe.response['fetch_values'] = [frappe.utils.parse_val(c) \
for c in frappe.db.sql("select %s from `tab%s` where name=%s" \
% (fetch, options, '%s'), (value,))[0]]
frappe.response['message'] = 'Ok'
@frappe.whitelist()
def add_comment(doc):
"""allow any logged user to post a comment"""
doc = frappe.get_doc(json.loads(doc))
doc.insert(ignore_permissions = True)
return doc.as_dict()
@frappe.whitelist()
def get_next(doctype, value, prev, filters=None, order_by="modified desc"):
import frappe.widgets.reportview
prev = not int(prev)
sort_field, sort_order = order_by.split(" ")
if not filters: filters = []
if isinstance(filters, basestring):
filters = json.loads(filters)
# condition based on sort order
condition = ">" if sort_order.lower()=="desc" else "<"
# switch the condition
if prev:
condition = "<" if condition==">" else "<"
else:
sort_order = "asc" if sort_order.lower()=="desc" else "desc"
# add condition for next or prev item
if not order_by[0] in [f[1] for f in filters]:
filters.append([doctype, sort_field, condition, value])
res = frappe.widgets.reportview.execute(doctype,
fields = ["name"],
filters = filters,
order_by = sort_field + " " + sort_order,
limit_start=0, limit_page_length=1, as_list=True)
if not res:
frappe.msgprint(_("No further records"))
return None
else:
return res[0][0]
@frappe.whitelist()
def get_linked_docs(doctype, name, metadata_loaded=None, no_metadata=False):
if not metadata_loaded: metadata_loaded = []
meta = frappe.widgets.form.meta.get_meta(doctype)
linkinfo = meta.get("__linked_with")
results = {}
for dt, link in linkinfo.items():
link["doctype"] = dt
link_meta_bundle = frappe.widgets.form.load.get_meta_bundle(dt)
linkmeta = link_meta_bundle[0]
if not linkmeta.get("issingle"):
fields = [d.fieldname for d in linkmeta.get("fields", {"in_list_view":1,
"fieldtype": ["not in", ["Image", "HTML", "Button", "Table"]]})] \
+ ["name", "modified", "docstatus"]
fields = ["`tab{dt}`.`{fn}`".format(dt=dt, fn=sf.strip()) for sf in fields if sf]
if link.get("child_doctype"):
ret = frappe.get_list(doctype=dt, fields=fields,
filters=[[link.get('child_doctype'), link.get("fieldname"), '=', name]])
else:
ret = frappe.get_list(doctype=dt, fields=fields,
filters=[[dt, link.get("fieldname"), '=', name]])
if ret:
results[dt] = ret
if not no_metadata and not dt in metadata_loaded:
frappe.local.response.docs.extend(link_meta_bundle)
return results
|
{
"content_hash": "c17878be5934fb077b8611efced3b559",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 118,
"avg_line_length": 29.270676691729324,
"alnum_prop": 0.6609298741330594,
"repo_name": "gangadhar-kadam/lgnlvefrape",
"id": "1440671cc60c18d75cf7dac57d50d602405094dd",
"size": "3997",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "frappe/widgets/form/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87202"
},
{
"name": "HTML",
"bytes": "77840"
},
{
"name": "JavaScript",
"bytes": "1555841"
},
{
"name": "Python",
"bytes": "972928"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import object
from unidecode import unidecode
from .utils import get_html_from_dynamic_site
from .utils import _get_search_url
from bs4 import BeautifulSoup
class CalculatorResult(object):
"""Represents a result returned from google calculator."""
def __init__(self):
self.value = None # Result value (eg. 157300.0)
self.from_value = None # Initial value (eg. 157.3)
self.unit = None # Result unit (eg. u'grams') (NOT implemented yet)
# Initial unit (eg. u'kilograms') (NOT implemented yet)
self.from_unit = None
# Initial expression (eg. u'157.3 grams') (NOT implemented yet)
self.expr = None
# Result expression (eg. u'157300 kilograms') (NOT implemented yet)
self.result = None
# Complete expression (eg. u'157.3 kilograms = 157300 grams') (NOT
# implemented yet)
self.fullstring = None
def __repr__(self):
return unidecode(self.value)
# PUBLIC
def calculate(expr):
"""Search for a calculation expression in google.
Attempts to search google calculator for the result of an expression.
Returns a `CalculatorResult` if successful or `None` if it fails.
Args:
expr: Calculation expression (eg. "cos(25 pi) / 17.4" or
"157.3kg in grams")
Returns:
CalculatorResult object."""
url = _get_search_url(expr)
html = get_html_from_dynamic_site(url)
bs = BeautifulSoup(html)
cr = CalculatorResult()
cr.value = _get_to_value(bs)
cr.from_value = _get_from_value(bs)
cr.unit = _get_to_unit(bs)
cr.from_unit = _get_from_unit(bs)
cr.expr = _get_expr(bs)
cr.result = _get_result(bs)
cr.fullstring = _get_fullstring(bs)
return cr
# PRIVATE
def _get_to_value(bs):
input_node = bs.find("div", {"id": "_Cif"})
return float(input_node.find("input")["value"])
def _get_from_value(bs):
input_node = bs.find("div", {"id": "_Aif"})
return float(input_node.find("input")["value"])
def _get_to_unit(bs):
return None
def _get_from_unit(bs):
return None
def _get_expr(bs):
return None
def _get_result(bs):
return None
def _get_fullstring(bs):
return None
|
{
"content_hash": "6e0fbda81c30bd58bf2c0e4e6776ae24",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 76,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.6366797044763146,
"repo_name": "Kronosfear/BEANBot",
"id": "2a10f1db7fcf9b49264aec9735fb177f5e824721",
"size": "2301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/modules/calculator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70591"
}
],
"symlink_target": ""
}
|
import django_filters as filters
from pdc.apps.common.filters import MultiValueFilter
from . import models
class RepoFilter(filters.FilterSet):
arch = MultiValueFilter(name='variant_arch__arch__name')
content_category = MultiValueFilter(name='content_category__name')
content_format = MultiValueFilter(name='content_format__name')
release_id = MultiValueFilter(name='variant_arch__variant__release__release_id')
variant_uid = MultiValueFilter(name='variant_arch__variant__variant_uid')
repo_family = MultiValueFilter(name='repo_family__name')
service = MultiValueFilter(name='service__name')
shadow = filters.BooleanFilter()
product_id = MultiValueFilter()
class Meta:
model = models.Repo
fields = ('arch', 'content_category', 'content_format', 'name', 'release_id',
'repo_family', 'service', 'shadow', 'variant_uid', 'product_id')
class RepoFamilyFilter(filters.FilterSet):
name = filters.CharFilter(lookup_type="icontains")
class Meta:
model = models.RepoFamily
fields = ('name',)
|
{
"content_hash": "e54f0a1a7a64e4d9e3cd9b997a774f52",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 37.51724137931034,
"alnum_prop": 0.6948529411764706,
"repo_name": "puiterwijk/product-definition-center",
"id": "820d66b2a63a1d2c0c4ff789bdaec7d71728f23c",
"size": "1197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdc/apps/repository/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1553"
},
{
"name": "Groff",
"bytes": "1766"
},
{
"name": "HTML",
"bytes": "48323"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1213188"
}
],
"symlink_target": ""
}
|
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from techson_server.settings import BASE_DIR
path = BASE_DIR + '/db/dataset/data.csv'
train_data = pd.read_csv(path)
y_train = train_data['label']
x_train = train_data.drop('label', axis=1)
RFC = RandomForestClassifier(n_estimators=200, n_jobs=-1)
RFC.fit(x_train, y_train)
path = BASE_DIR + '/classifiers/random_forest_classifier.pkl'
with open(path, 'wb') as f:
pickle.dump(RFC, f)
|
{
"content_hash": "f17bf20cad9dab8c065cb0090e189e46",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 24.05,
"alnum_prop": 0.7318087318087318,
"repo_name": "KirovVerst/techson_server",
"id": "e80433109a94d8a3cc86e144a3919548faa0cbf6",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlscripts/random_forest_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5915"
}
],
"symlink_target": ""
}
|
class SimpleStorlet(object):
def __init__(self, logger):
self.logger = logger
def __call__(self, in_files, out_files, params):
"""
The function called for storlet invocation
:param in_files: a list of StorletInputFile
:param out_files: a list of StorletOutputFile
:param params: a dict of request parameters
"""
self.logger.debug('Returning metadata')
metadata = in_files[0].get_metadata()
metadata['test'] = 'simple'
out_files[0].set_metadata(metadata)
self.logger.debug('Start to return object data')
while True:
buf = in_files[0].read(16)
if not buf:
break
self.logger.debug('Received %d bytes' % len(buf))
self.logger.debug('Writing back %d bytes' % len(buf))
out_files[0].write(buf)
self.logger.debug('Complete')
in_files[0].close()
out_files[0].close()
|
{
"content_hash": "6f7ffcf6c4aaf8946b93b81f01392585",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 65,
"avg_line_length": 34.785714285714285,
"alnum_prop": 0.5698151950718686,
"repo_name": "openstack/storlets",
"id": "b5b4c4c91a64ea03da809f23a255f9778bcc33d3",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StorletSamples/python/storlet_samples/simple/simple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "31430"
},
{
"name": "Java",
"bytes": "184917"
},
{
"name": "Jupyter Notebook",
"bytes": "7689"
},
{
"name": "Makefile",
"bytes": "347"
},
{
"name": "Python",
"bytes": "579917"
},
{
"name": "Shell",
"bytes": "20127"
}
],
"symlink_target": ""
}
|
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in handles.values())
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
|
{
"content_hash": "2634cdb86e5e40c1704cf2c9b59f0f24",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 111,
"avg_line_length": 35.52258064516129,
"alnum_prop": 0.624772974936433,
"repo_name": "neoscoin/neos-core",
"id": "1485e69f4c7cd655017139e99cfc68f01abc2b3e",
"size": "5582",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/ledger/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "1007002"
},
{
"name": "C++",
"bytes": "4497329"
},
{
"name": "CSS",
"bytes": "43189"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "157578"
},
{
"name": "Makefile",
"bytes": "98161"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7276"
},
{
"name": "Python",
"bytes": "1479873"
},
{
"name": "QMake",
"bytes": "26220"
},
{
"name": "Roff",
"bytes": "17862"
},
{
"name": "Shell",
"bytes": "51973"
}
],
"symlink_target": ""
}
|
""" Test using marathon on marathon (MoM).
This test suite imports all common tests found in marathon_common.py which are
to be tested on root marathon and MoM.
In addition it contains tests which are specific to MoM environments only.
"""
import apps
import common
import pytest
import retrying
import scripts
import shakedown
import time
import logging
from datetime import timedelta
import shakedown.dcos.service
from shakedown.clients import mesos
from shakedown.dcos.agent import restart_agent
from shakedown.dcos.command import run_command_on_master
from shakedown.dcos.marathon import deployment_wait, marathon_on_marathon
from shakedown.dcos.package import uninstall_package_and_wait
from shakedown.dcos.service import wait_for_service_endpoint
from shakedown.dcos.task import wait_for_task
from shakedown.dcos.zookeeper import delete_zk_node
# the following lines essentially do:
# from marathon_common_tests import test_*
import marathon_common_tests
for attribute in dir(marathon_common_tests):
if attribute.startswith('test_'):
exec("from marathon_common_tests import {}".format(attribute))
from shakedown.dcos.agent import required_private_agents # NOQA
from fixtures import wait_for_marathon_user_and_cleanup # NOQA
logger = logging.getLogger(__name__)
pytestmark = [pytest.mark.usefixtures('wait_for_marathon_user_and_cleanup')]
@pytest.fixture(scope="function")
def marathon_service_name():
return "marathon-user"
def setup_module(module):
common.ensure_mom()
common.cluster_info()
with marathon_on_marathon() as client:
common.clean_up_marathon(client=client)
def teardown_module(module):
with marathon_on_marathon() as client:
try:
common.clean_up_marathon(client=client)
except Exception:
pass
uninstall_package_and_wait('marathon')
delete_zk_node('universe/marathon-user')
# Remove everything from root marathon
common.clean_up_marathon()
#################################################
# MoM only tests
#################################################
def test_ui_registration_requirement():
""" Testing the UI is a challenge with this toolchain. The UI team has the
best tooling for testing it. This test verifies that the required configurations
for the service endpoint and ability to launch to the service UI are present.
"""
tasks = mesos.get_master().tasks()
for task in tasks:
if task['name'] == 'marathon-user':
for label in task['labels']:
if label['key'] == 'DCOS_PACKAGE_NAME':
assert label['value'] == 'marathon'
if label['key'] == 'DCOS_SERVICE_NAME':
assert label['value'] == 'marathon-user'
@shakedown.dcos.agent.private_agents(2)
def test_mom_when_mom_agent_bounced():
"""Launch an app from MoM and restart the node MoM is on."""
app_def = apps.sleep_app()
app_id = app_def["id"]
mom_ip = common.ip_of_mom()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
with marathon_on_marathon() as client:
client.add_app(app_def)
deployment_wait(service_id=app_id, client=client)
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
restart_agent(mom_ip)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, "The task ID has changed"
check_task_is_back()
@shakedown.dcos.agent.private_agents(2)
def test_mom_when_mom_process_killed():
"""Launched a task from MoM then killed MoM."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
with marathon_on_marathon() as client:
client.add_app(app_def)
deployment_wait(service_id=app_id, client=client)
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
common.kill_process_on_host(common.ip_of_mom(), 'marathon-assembly')
wait_for_task('marathon', 'marathon-user', 300)
wait_for_service_endpoint('marathon-user', path="ping")
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, "The task ID has changed"
check_task_is_back()
@shakedown.dcos.agent.private_agents(2)
def test_mom_with_network_failure():
"""Marathon on Marathon (MoM) tests for DC/OS with network failures simulated by knocking out ports."""
mom_ip = common.ip_of_mom()
logger.info("MoM IP: {}".format(mom_ip))
app_def = apps.sleep_app()
app_id = app_def["id"]
with marathon_on_marathon() as client:
client.add_app(app_def)
wait_for_task("marathon-user", app_id.lstrip('/'))
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]["id"]
task_ip = tasks[0]['host']
# PR for network partitioning in shakedown makes this better
# take out the net
partition_agent(mom_ip)
partition_agent(task_ip)
# wait for a min
time.sleep(timedelta(minutes=1).total_seconds())
# bring the net up
reconnect_agent(mom_ip)
reconnect_agent(task_ip)
time.sleep(timedelta(minutes=1).total_seconds())
wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds(), path="ping")
wait_for_task("marathon-user", app_id.lstrip('/'))
with marathon_on_marathon() as client:
wait_for_task("marathon-user", app_id.lstrip('/'))
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, "The task ID has changed"
check_task_is_back()
@shakedown.dcos.cluster.dcos_1_9
@shakedown.dcos.agent.private_agents(2)
def test_mom_with_network_failure_bounce_master():
"""Marathon on Marathon (MoM) tests for DC/OS with network failures simulated by knocking out ports."""
# get MoM ip
mom_ip = common.ip_of_mom()
logger.info("MoM IP: {}".format(mom_ip))
app_def = apps.sleep_app()
app_id = app_def["id"]
with marathon_on_marathon() as client:
client.add_app(app_def)
wait_for_task("marathon-user", app_id.lstrip('/'))
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]["id"]
task_ip = tasks[0]['host']
logger.info("\nTask IP: " + task_ip)
# PR for network partitioning in shakedown makes this better
# take out the net
partition_agent(mom_ip)
partition_agent(task_ip)
# wait for a min
time.sleep(timedelta(minutes=1).total_seconds())
# bounce master
run_command_on_master("sudo systemctl restart dcos-mesos-master")
# bring the net up
reconnect_agent(mom_ip)
reconnect_agent(task_ip)
time.sleep(timedelta(minutes=1).total_seconds())
wait_for_service_endpoint('marathon-user', timedelta(minutes=10).total_seconds(), path="ping")
with marathon_on_marathon() as client:
wait_for_task("marathon-user", app_id.lstrip('/'), timedelta(minutes=10).total_seconds())
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, "The task ID has changed"
check_task_is_back()
def test_framework_unavailable_on_mom():
"""Launches an app that has elements necessary to create a service endpoint in DCOS.
This test confirms that the endpoint is not created when launched with MoM.
"""
app_def = apps.fake_framework()
app_id = app_def["id"]
with marathon_on_marathon() as client:
client.add_app(app_def)
deployment_wait(service_id=app_id, client=client)
try:
wait_for_service_endpoint('pyfw', 15)
except Exception:
pass
else:
assert False, 'MoM shoud NOT create a service endpoint'
def partition_agent(hostname):
"""Partition a node from all network traffic except for SSH and loopback"""
shakedown.dcos.file.copy_file_to_agent(hostname, "{}/net-services-agent.sh".format(scripts.scripts_dir()))
logger.info("partitioning {}".format(hostname))
shakedown.dcos.command.run_command_on_agent(hostname, 'sh net-services-agent.sh fail')
def reconnect_agent(hostname):
"""Reconnect a node to cluster"""
shakedown.dcos.command.run_command_on_agent(hostname, 'sh net-services-agent.sh')
|
{
"content_hash": "06bb7c86ca4c573bb875c57bcc756598",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 112,
"avg_line_length": 33.545112781954884,
"alnum_prop": 0.6600918973439426,
"repo_name": "gsantovena/marathon",
"id": "f2fb379fc9eb5440b2498ca093f5076e1c0b6142",
"size": "8923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/system/test_marathon_on_marathon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "59278"
},
{
"name": "Dockerfile",
"bytes": "6958"
},
{
"name": "Groovy",
"bytes": "17238"
},
{
"name": "HTML",
"bytes": "16356"
},
{
"name": "Java",
"bytes": "36549"
},
{
"name": "Liquid",
"bytes": "1484"
},
{
"name": "Makefile",
"bytes": "9396"
},
{
"name": "Python",
"bytes": "425193"
},
{
"name": "RAML",
"bytes": "356"
},
{
"name": "Ruby",
"bytes": "772"
},
{
"name": "Scala",
"bytes": "4614713"
},
{
"name": "Shell",
"bytes": "47966"
}
],
"symlink_target": ""
}
|
prime = 101
def pattern_matching(text, pattern):
m = len(pattern)
n = len(text)
pattern_hash = create_hash(pattern, m - 1)
text_hash = create_hash(text, m - 1)
for i in range(1, n - m + 2):
if pattern_hash == text_hash:
if check_equal(text[i-1:i+m-1], pattern[0:]) is True:
return i - 1;
if i < n - m + 1:
text_hash = recalculate_hash(text, i-1, i+m-1, text_hash, m)
return -1;
def check_equal(str1, str2):
if len(str1) != len(str2):
return False;
i = 0
j = 0
for i, j in zip(str1, str2):
if i != j:
return False;
return True
def create_hash(input, end):
hash = 0
for i in range(end + 1):
hash = hash + ord(input[i])*pow(prime, i)
return hash
def recalculate_hash(input, old_index, new_index, old_hash, pattern_len):
new_hash = old_hash - ord(input[old_index])
new_hash = new_hash/prime
new_hash += ord(input[new_index])*pow(prime, pattern_len - 1)
return new_hash;
index = pattern_matching("TusharRoy", "sharRoy")
print("Index ", index)
index = pattern_matching("TusharRoy", "Roy")
print("Index ", index)
index = pattern_matching("TusharRoy", "shar")
print("Index ", index)
index = pattern_matching("TusharRoy", "usha")
print("Index ", index)
index = pattern_matching("TusharRoy", "Tus")
print("Index ", index)
index = pattern_matching("TusharRoy", "Roa")
print("Index ", index)
|
{
"content_hash": "b75dc2be7d06efb0805ebeabdf93447d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 29.24,
"alnum_prop": 0.5902872777017784,
"repo_name": "mission-peace/interview",
"id": "fa2daf37dd0ff1ec3977606b3362ae55811ce149",
"size": "1598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/string/rabinkarp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "26234"
},
{
"name": "Java",
"bytes": "1111425"
},
{
"name": "Python",
"bytes": "105849"
}
],
"symlink_target": ""
}
|
from projects.model.Tasklog import Tasklog
from projects.model.Timelog import Timelog
from projects.model.Date import Date
from projects.model.Buglog import Buglog
from projects.model.Generallog import Generallog
class TimesheetsParser:
"""This class parses the json response for Time sheets."""
def get_task_logs(self, resp):
"""This method parses the given response and returns task log object.
Args:
resp(dict): Response containing json object for task log.
Returns:
instance: task log object.
"""
for value in resp['timelogs']['task_logs']:
tasklog = self.json_to_tasklog(value)
return tasklog
def get_message(self, resp):
"""This method is used to parse the given response and returns string message.
Args:
resp(dict): Response containing json object for message.
Returns:
str: Success message.
"""
return resp['response']
def get_time_logs(self, resp):
"""This method parses the given response and returns time log object.
Args:
resp(dict): Response containing json object for time logs.
Returns:
instance: Time log object.
"""
timelog = Timelog()
resp = resp['timelogs']
if 'grandtotal' in resp:
timelog.set_grandtotal(resp['grandtotal'])
if 'role' in resp:
timelog.set_role(resp['role'])
if 'date' in resp:
for value in resp['date']:
date = Date()
if 'total_hours' in value:
date.set_total_hours(value['total_hours'])
if 'display_format' in value:
date.set_display_format(value['display_format'])
if 'date_long' in value:
date.set_date_long(value['date_long'])
if 'task_logs' in value:
for task_log in value['task_logs']:
tasklog = self.json_to_tasklog(task_log)
date.set_task_logs(tasklog)
if 'bug_logs' in value:
for bug_log in value['bug_logs']:
buglog = self.json_to_buglog(bug_log)
date.set_bug_logs(buglog)
if 'general_logs' in value:
for general_log in value['general_logs']:
generallog = self.json_to_generallog(general_log)
date.set_general_logs(generallog)
timelog.set_date(date)
return timelog
def get_bug_log(self, resp):
"""This method parses the given response and returns bug log object.
Args:
resp(dict): Response containing json object for bug log.
Returns:
instance: Bug log object.
"""
for value in resp['timelogs']['bug_logs']:
buglog = self.json_to_buglog(value)
return buglog
def get_general_log(self, resp):
"""This method parses the given response and returns general log object.
Args:
resp(dict): Response containing json object for general log.
Returns:
instance: General log object.
"""
for value in resp['timelogs']['general_logs']:
general_log = self.json_to_generallog(value)
return general_log
def json_to_tasklog(self, value):
"""This method parses the given response and returns task log object.
Args:
value(dict): Response containing json object for task log.
Returns:
instance: Task log object.
"""
tasklog = Tasklog()
if 'owner_name' in value:
tasklog.set_owner_name(value['owner_name'])
if 'hours' in value:
tasklog.set_hours(value['hours'])
if 'total_minutes' in value:
tasklog.set_total_minutes(value['total_minutes'])
if 'bill_status' in value:
tasklog.set_bill_status(value['bill_status'])
if 'link' in value:
if 'self' in value['link']:
if 'url' in value['link']['self']['url']:
tasklog.set_url(value['link']['self']['url'])
if 'hours_display' in value:
tasklog.set_hours_display(value['hours_display'])
if 'id' in value:
tasklog.set_id(value['id'])
if 'log_date_format' in value:
tasklog.set_log_date_format(value['log_date_format'])
if 'task' in value:
if 'id' in value['task']:
tasklog.set_task_id(value['task']['id'])
if 'name' in value['task']:
tasklog.set_task_name(value['task']['name'])
if 'log_date' in value:
tasklog.set_log_date(value['log_date'])
if 'notes' in value:
tasklog.set_notes(value['notes'])
if 'log_date_long' in value:
tasklog.set_log_date_long(value['log_date_long'])
if 'minutes' in value:
tasklog.set_minutes(value['minutes'])
if 'owner_id' in value:
tasklog.set_owner_id(value['owner_id'])
return tasklog
def json_to_buglog(self, value):
"""This method parses the given response and returns bug log object.
Args:
value(dict): Response containing json object for bug log.
Returns:
instance: Bug log object.
"""
buglog = Buglog()
if 'owner_name' in value:
buglog.set_owner_name(value['owner_name'])
if 'hours' in value:
buglog.set_hours(value['hours'])
if 'total_minutes' in value:
buglog.set_total_minutes(value['total_minutes'])
if 'bill_status' in value:
buglog.set_bill_status(value['bill_status'])
if 'link' in value:
if 'self' in value['link']:
if 'url' in value['link']['self']['url']:
buglog.set_url(value['link']['self']['url'])
if 'hours_display' in value:
buglog.set_hours_display(value['hours_display'])
if 'id' in value:
buglog.set_id(value['id'])
if 'log_date_format' in value:
buglog.set_log_date_format(value['log_date_format'])
if 'bug' in value:
if 'id' in value['bug']:
buglog.set_bug_id(value['bug']['id'])
if 'title' in value['bug']:
buglog.set_bug_title(value['bug']['title'])
if 'log_date' in value:
buglog.set_log_date(value['log_date'])
if 'notes' in value:
buglog.set_notes(value['notes'])
if 'log_date_long' in value:
buglog.set_log_date_long(value['log_date_long'])
if 'minutes' in value:
buglog.set_minutes(value['minutes'])
if 'owner_id' in value:
buglog.set_owner_id(value['owner_id'])
return buglog
def json_to_generallog(self, value):
"""This method parses the given response and returns bug log object.
Args:
value(dict): Response containing json object for bug log.
Returns:
instance: Bug log object.
"""
generallog = Generallog()
if 'owner_name' in value:
generallog.set_owner_name(value['owner_name'])
if 'hours' in value:
generallog.set_hours(value['hours'])
if 'total_minutes' in value:
generallog.set_total_minutes(value['total_minutes'])
if 'bill_status' in value:
generallog.set_bill_status(value['bill_status'])
if 'link' in value:
if 'self' in value['link']:
if 'url' in value['link']['self']['url']:
generallog.set_url(value['link']['self']['url'])
if 'hours_display' in value:
generallog.set_hours_display(value['hours_display'])
if 'id' in value:
generallog.set_id(value['id'])
if 'log_date_format' in value:
generallog.set_log_date_format(value['log_date_format'])
if 'log_date' in value:
generallog.set_log_date(value['log_date'])
if 'notes' in value:
generallog.set_notes(value['notes'])
if 'log_date_long' in value:
generallog.set_log_date_long(value['log_date_long'])
if 'minutes' in value:
generallog.set_minutes(value['minutes'])
if 'owner_id' in value:
generallog.set_owner_id(value['owner_id'])
if 'name' in value:
generallog.set_name(value['name'])
return generallog
def to_json(self, log):
"""This method is used to convert timelog object to json format.
Args:
log(instance): Time log object.
Returns:
instance: time log object.
"""
data = {}
if log.get_log_date() != "":
data['date'] = log.get_log_date()
if log.get_bill_status() != "":
data['bill_status'] = log.get_bill_status()
if log.get_hours() != "":
data['hours'] = log.get_hours()
if log.get_notes() != "":
data['notes'] = log.get_notes()
return data
|
{
"content_hash": "22f3a32d6aead19b08378b3e6ce5244f",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 86,
"avg_line_length": 35.71923076923077,
"alnum_prop": 0.5422633789167653,
"repo_name": "zoho/projects-python-wrappers",
"id": "ccbc79a526aefa1c38fea8af10aa633b547e3e2a",
"size": "9294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/parser/TimesheetsParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "239674"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from .views import welcome, prequestionnaire, preinstructions, pretask, precomparison,\
strategy, task, question, postcomparison, thanks
urlpatterns = [
url(r'^$', welcome),
url(r'^welcome/', welcome, name='welcome'),
url(r'^prequestionnaire/', prequestionnaire, name='prequestionnaire'),
url(r'^pretask/', pretask, name='pretask'),
url(r'^preinstructions/', preinstructions, name='preinstructions'),
url(r'^precomparison/', precomparison, name='precomparison'),
url(r'^strategy/(?P<question_index>[0-9]+)/$', strategy, name='strategy'),
url(r'^task/(?P<question_index>[0-9]+)/$', task, name='task'),
url(r'^question/(?P<question_index>[0-9]+)/$', question, name='question'),
url(r'^postcomparison/', postcomparison, name='postcomparison'),
url(r'^thanks/', thanks, name='thanks'),
]
|
{
"content_hash": "a40963bf651bba55a262542ecbc7fa34",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 87,
"avg_line_length": 45.68421052631579,
"alnum_prop": 0.6728110599078341,
"repo_name": "andrewhead/Search-Task-Logger",
"id": "f0478aaba0556d27759e6e36a84a5c6935abdacc",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchlogger/form/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2993"
},
{
"name": "DIGITAL Command Language",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "11766"
},
{
"name": "Python",
"bytes": "100170"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
}
|
'''
Service support for Solaris 10 and 11, should work with other systems
that use SMF also. (e.g. SmartOS)
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
'''
# Import Python libs
from __future__ import absolute_import
__func_alias__ = {
'reload_': 'reload'
}
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on systems which default to SMF
'''
if 'Solaris' in __grains__['os_family']:
# Don't let this work on Solaris 9 since SMF doesn't exist on it.
if __grains__['kernelrelease'] == "5.9":
return (False, 'The smf execution module failed to load: SMF not available on Solaris 9.')
return __virtualname__
return (False, 'The smf execution module failed to load: only available on Solaris.')
def _get_enabled_disabled(enabled_prop="true"):
'''
DRY: Get all service FMRIs and their enabled property
'''
ret = set()
cmd = '/usr/bin/svcprop -c -p general/enabled "*"'
lines = __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if comps[2] == enabled_prop:
ret.add(comps[0].split("/:properties")[0])
return sorted(ret)
def get_running():
'''
Return the running services
CLI Example:
.. code-block:: bash
salt '*' service.get_running
'''
ret = set()
cmd = '/usr/bin/svcs -H -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if 'online' in line:
ret.add(comps[0])
return sorted(ret)
def get_stopped():
'''
Return the stopped services
CLI Example:
.. code-block:: bash
salt '*' service.get_stopped
'''
ret = set()
cmd = '/usr/bin/svcs -aH -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if 'online' not in line and 'legacy_run' not in line:
ret.add(comps[0])
return sorted(ret)
def available(name):
'''
Returns ``True`` if the specified service is available, otherwise returns
``False``.
We look up the name with the svcs command to get back the FMRI
This allows users to use simpler service names
CLI Example:
.. code-block:: bash
salt '*' service.available net-snmp
'''
cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
name = __salt__['cmd.run'](cmd, python_shell=False)
return name in get_all()
def missing(name):
'''
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing net-snmp
'''
cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
name = __salt__['cmd.run'](cmd, python_shell=False)
return name not in get_all()
def get_all():
'''
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
ret = set()
cmd = '/usr/bin/svcs -aH -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
ret.add(comps[0])
return sorted(ret)
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '/usr/sbin/svcadm disable -s -t {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/usr/sbin/svcadm restart {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling restart doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False
def reload_(name):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '/usr/sbin/svcadm refresh {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling reload doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
cmd = '/usr/bin/svcs -H -o STATE {0}'.format(name)
line = __salt__['cmd.run'](cmd, python_shell=False)
if line == 'online':
return True
else:
return False
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
cmd = '/usr/sbin/svcadm enable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
cmd = '/usr/sbin/svcadm disable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
# The property that reveals whether a service is enabled
# can only be queried using the full FMRI
# We extract the FMRI and then do the query
fmri_cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
fmri = __salt__['cmd.run'](fmri_cmd, python_shell=False)
cmd = '/usr/sbin/svccfg -s {0} listprop general/enabled'.format(fmri)
comps = __salt__['cmd.run'](cmd, python_shell=False).split()
if comps[2] == 'true':
return True
else:
return False
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def get_enabled():
'''
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Note that this returns the full FMRI
return _get_enabled_disabled("true")
def get_disabled():
'''
Return the disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
# Note that this returns the full FMRI
return _get_enabled_disabled("false")
|
{
"content_hash": "44e197180e737d3941ca2ae966d14d2c",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 102,
"avg_line_length": 24.0748502994012,
"alnum_prop": 0.5942047009078473,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "0fb8e55f687666edc206f0cbae3965dc2ff3fb70",
"size": "8065",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/modules/smf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil import parser as date_parser
import inspect
import warnings
from tinymodel.internals.field_def_validation import __substitute_class_refs
from tinymodel.utils import ValidationError
def __validate_field_value(tinymodel, this_field, original_value, allowed_types, value):
"""
A field-level validation method that checks the value of the field against the field's allowed_types
:param Field this_field: The field whose validity we are checking
:param original_value: The data we are validating. This is so we can keep its structure across recursive calls
:param [class | {class: class} | [class] | (class,) | {class,}] allowed_types: The allowed data types, as an array of Python class definitions
:param object value: The value of the field
:rtype bool: True indicates the field's value is valid, False indicates that it is not
"""
valid = False
if type(value) in tinymodel.COLLECTION_TYPES:
valid_allowed_types = [x for x in allowed_types if isinstance(x, type(value))]
if valid_allowed_types:
if value and isinstance(value, dict):
key_valid = __validate_field_value(tinymodel, this_field, original_value, map(lambda x: x.keys()[0], valid_allowed_types), value.keys()[0])
value_valid = __validate_field_value(tinymodel, this_field, original_value, map(lambda x: x.values()[0], valid_allowed_types), value.values()[0])
valid = key_valid and value_valid
elif value:
valid = __validate_field_value(tinymodel, this_field, original_value, map(lambda x: iter(x).next(), valid_allowed_types), iter(value).next())
else:
# value is an empty collection type, but this is allowed
valid = True
else:
if type(value) in allowed_types:
valid = True
if valid:
this_field.last_validated_value = original_value
this_field.was_validated = valid
return valid
def validate(tinymodel, prior_errors=[], warning_only=False):
"""
A model-level validation function which checks the following:
1) The model contains no fields that are not explicitly defined in the FIELDS array
2) All fields defined with required=True exist
3) All existing fields contain valid values
Exceptions are raised if any of the above is not true.
:param [str] prior_errors: Optional list of prior errors to append to any validation errors generated
:param bool warning_only: If True, this validation will raise only warnings instead of Exceptions
"""
data_validation_errors = []
# Test missing required fields
for field_def in tinymodel.FIELD_DEFS:
if field_def.required and not hasattr(tinymodel, field_def.title):
data_validation_errors.append("Missing required field: " + field_def.title)
# Test invalid field values
for field in tinymodel.FIELDS:
if field.field_def.validate and not field.is_valid():
if not __validate_field_value(tinymodel, this_field=field, original_value=field.value, allowed_types=field.field_def.allowed_types, value=field.value):
data_validation_errors.append("Invalid field: " + field.field_def.title + " has value of type " + str(type(field.value)) + " but allowed types are " + str(field.field_def.allowed_types))
errors = prior_errors + data_validation_errors
if errors:
if warning_only:
warnings.warn("Validation Errors on " + str(tinymodel) + ":\n" + "\n".join(errors))
else:
raise ValidationError("Validation Errors on " + str(tinymodel) + ":\n" + "\n".join(errors))
def __match_field_value(cls, name, value):
from tinymodel import TinyModel
def new_validation_error(value, field_name, allowed_types):
error = '<%s %r> is not a valid value for "%s". Allowed types are: %r'
return ValidationError(error % (type(value).__name__, value, field_name, allowed_types))
value_type = type(value)
if value_type in cls.COLLECTION_TYPES:
if name.endswith('_ids') and value_type in (list, tuple, set):
for v in value:
if type(v) not in (long, int, str, unicode):
raise new_validation_error(value, name, '[list(int|long,str|unicode), tuple(int|long,str|unicode)], set(int|long,str|unicode)')
if type(v) in (str, unicode):
try:
long(v)
except ValueError:
raise new_validation_error(value, name, '[list(int|long,str|unicode), tuple(int|long,str|unicode)], set(int|long,str|unicode)')
else:
try:
field_def = filter(lambda f: f.title == name, cls.FIELD_DEFS)[0]
except IndexError:
# name can be fk with '_id' at the end
field_def = filter(lambda f: f.title == name[:-3], cls.FIELD_DEFS)[0]
for index, allowed_type in enumerate(field_def.allowed_types):
field_def.allowed_types[index] = __substitute_class_refs(cls, field_name=field_def.title, required=field_def.required, field_type=allowed_type)
if value and isinstance(value, dict):
if is_lookup_dict(value):
validate_range_lookup(value, field_def.allowed_types)
else:
key_valid = __match_field_value(cls, map(lambda x: x.keys()[0], field_def.allowed_types), value.keys()[0])
value_valid = __match_field_value(cls, map(lambda x: x.values()[0], field_def.allowed_types), value.values()[0])
if not (key_valid and value_valid):
raise new_validation_error(value, name, field_def.allowed_types)
elif value:
for v in value:
valid = False
for allowed_type in field_def.allowed_types:
if allowed_type in (list, tuple, set):
if type(v) == allowed_type:
valid = True
elif type(allowed_type) in (list, tuple, set):
if type(v) in allowed_type:
valid = True
elif inspect.isclass(allowed_type) and \
issubclass(allowed_type, TinyModel):
if type(v) in (long, int, unicode, str):
valid = True
elif isinstance(v, allowed_type):
valid = True
if not valid:
raise new_validation_error(value, name, field_def.allowed_types)
else:
try:
field_def = filter(lambda f: f.title == name, cls.FIELD_DEFS)[0]
except IndexError: # the id is expanded with _id, remove _id
field_def = filter(lambda f: f.title == name[:-3], cls.FIELD_DEFS)[0]
if name.endswith('_id') and field_def.relationship != 'attribute':
if value_type not in set(field_def.allowed_types[1:] + [long, int, str, unicode]):
raise new_validation_error(value, name, [long, int, str, unicode])
if value_type in (str, unicode) and not value == None:
try:
long(value)
except ValueError:
raise new_validation_error(value, name, [long, int, str, unicode])
else:
field_def = filter(lambda f: f.title == name, cls.FIELD_DEFS)[0]
for index, allowed_type in enumerate(field_def.allowed_types):
field_def.allowed_types[index] = __substitute_class_refs(cls, field_name=field_def.title, required=field_def.required, field_type=allowed_type)
if value_type not in field_def.allowed_types:
raise new_validation_error(value, name, field_def.allowed_types)
def match_field_values(cls, **kwargs):
for name, value in kwargs.iteritems():
__match_field_value(cls, name, value)
def __remove_values(cls, condition, **kwargs):
keys = set(kwargs.keys())
for field_def in cls.FIELD_DEFS:
field_names = set([field_def.title, field_def.alias])
if condition(field_def) and (field_names & keys):
del kwargs[(field_names & keys).pop()]
return kwargs
def validate_order_by(cls, order_by):
ORDER_BY_VALUES = ['ascending', 'descending', None]
for key, value in order_by.items():
if key not in [title for title in [field_def.title for field_def in cls.FIELD_DEFS]]:
raise ValidationError(str(key) + " is not valid searchable field")
if value not in ORDER_BY_VALUES:
raise ValidationError(str(value) + " is not a valid ordering option, valid options are: " + str(ORDER_BY_VALUES))
def validate_fuzzy_fields(cls, fields=[]):
fuzzy_fields = filter(lambda f: f.title in fields, cls.FIELD_DEFS)
if not fuzzy_fields:
raise ValidationError('One or more fields indicated for fuzzy search, is not a field of %r' % cls)
allowed_types = set([unicode, str])
for field_def in fuzzy_fields:
if not set(field_def.allowed_types) & allowed_types:
raise ValidationError('%r is not a text field. Field not compatible with fuzzy search!' % field_def.title)
def validate_range_lookup(lookup_dict, allowed_types):
"""
Validates the contents of a dictionary meant for looking up objects by a range of values.
:param dict lookup_dict: The dictionary containing the ranges to look up by.
:param list allowed_types: The list of types that are allowed for the field being validated.
"""
lt_lookups = set(['lt', 'lte'])
gt_lookups = set(['gt', 'gte'])
lookup_keys = lt_lookups | gt_lookups
if set(lookup_dict.keys()) - lookup_keys:
raise ValidationError('Invalid lookup keys: %s' % (set(lookup_dict.keys()) - lookup_keys))
if (set(lookup_dict.keys()) & lt_lookups) == lt_lookups:
raise ValidationError('"lt" and "lte" cannot be used together:\n%s' % lookup_dict)
if (set(lookup_dict.keys()) & gt_lookups) == gt_lookups:
raise ValidationError('"gt" and "gte" cannot be used together:\n%s' % lookup_dict)
for key in lookup_dict.keys():
if type(lookup_dict[key]) == datetime.date:
lookup_dict[key] = datetime.datetime.combine(lookup_dict[key], datetime.time())
elif isinstance(lookup_dict[key], str):
try:
lookup_dict[key] = date_parser.parse(lookup_dict[key])
except:
pass
if type(lookup_dict[key]) not in allowed_types:
raise ValidationError('%r is not a valid value for this field. '
'Valid types are: %r\n%s' % (lookup_dict[key], allowed_types, lookup_dict))
gt_key = next(iter(set(lookup_dict.keys()) & gt_lookups), None)
lt_key = next(iter(set(lookup_dict.keys()) & lt_lookups), None)
if lt_key and gt_key:
if lookup_dict[gt_key] > lookup_dict[lt_key]:
raise ValidationError(
'"%s" value (%s) must be less than "%s" value (%s).' % (
lt_key, lookup_dict[lt_key], gt_key, lookup_dict[gt_key]
)
)
remove_calculated_values = lambda cls, **kwargs: __remove_values(cls, lambda f: f.calculated, **kwargs)
remove_has_many_values = lambda cls, **kwargs: __remove_values(cls, lambda f: f.relationship == 'has_many', **kwargs)
remove_datetime_values = lambda cls, **kwargs: __remove_values(cls, lambda f: datetime.datetime in f.allowed_types, **kwargs)
remove_float_values = lambda cls, **kwargs: __remove_values(cls, lambda f: float in f.allowed_types, **kwargs)
is_lookup_dict = lambda dict_obj: set(['lt', 'gt', 'lte', 'gte']) & set(dict_obj.keys())
|
{
"content_hash": "36ceef810d0be2edc97ddac44f26ace1",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 202,
"avg_line_length": 50.51914893617021,
"alnum_prop": 0.6116071428571429,
"repo_name": "buzzfeed/tinymodel",
"id": "70460e6a99afbd451bcb9d165410095daeb12587",
"size": "11872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinymodel/internals/validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106726"
}
],
"symlink_target": ""
}
|
"""
Tests the `todo_export.to_html_dicts() function by generate a webpage
containing their results.
"""
import os
import unittest
from datetime import datetime
from pathlib import Path
import prjct
from prjct.todo_export import to_html_dicts
class TestTodoExport(unittest.TestCase):
# TODO: Add setup that removes all files in test/results directory
def test_to_html_lists(self):
p = Path(__file__) # location of this file
OUTPUT_FILE_NAME = 'test_todo_export.html'
output_file = p.parent / 'results' / OUTPUT_FILE_NAME
todos, dones = to_html_dicts(prjct.config.load())
todos_split = "\n".join(["<h3>" + project + "</h3>" + todo_list for project, todo_list in todos.items()])
dones_split = "\n".join(["<h3>" + project + "</h3>" + done_list for project, done_list in dones.items()])
html = """\
<html>
<head>
<title>prjct -- todo_export.to_html_dicts() function test</title>
</head>
<body>
<h1>prjct -- todo_export.toto_html_dicts() function test</h1>
<p>{}</p>
<h2>My To-do Items</h2>
{}
<h2>My Done Items</h2>
{}
<hr />
<p>prjct v.{} -- test run {}</p>
</body>
</head>
""".format(prjct.__doc__, todos_split, dones_split, prjct.__version__, datetime.now())
output_file.write_text(html)
self.assertTrue(os.path.isfile(str(output_file)))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "baa637801003e54592adb09aafadfb1f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 113,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.5489949748743719,
"repo_name": "MinchinWeb/prjct",
"id": "7094df48e372b79a64dc858a15e3bcb288651d6c",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_todo_export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "395"
},
{
"name": "Python",
"bytes": "43369"
}
],
"symlink_target": ""
}
|
import cPickle
from gzip import GzipFile
import re
import Bio.SeqIO
import pandas as pd
from progressbar import ProgressBar
from common import (
int_or_seq, dataframe_from_counts, bad_amino_acids,
fetch_file, fetch_and_transform
)
BASE_URL = "ftp://ftp.ensembl.org"
FTP_DIR = "/pub/release-75/fasta/homo_sapiens/pep/"
FASTA_FILENAME = "Homo_sapiens.GRCh37.75.pep.all.fa"
GZ_FILENAME = FASTA_FILENAME + ".gz"
FULL_URL = BASE_URL + FTP_DIR + GZ_FILENAME
def load_dataframe():
"""
Loads the protein products of the reference genome
in a dataframe with columns:
- protein : amino acid string
- protein_id
- gene_id
- transcript_id
"""
filename = fetch_file(FASTA_FILENAME, FULL_URL)
sequences = []
protein_ids = []
gene_ids = []
transcript_ids = []
gene_pattern = re.compile('gene:(ENSG[0-9]*)')
transcript_pattern = re.compile('transcript:(ENST[0-9]*)')
with open(filename, 'r') as f:
for record in Bio.SeqIO.parse(f, 'fasta'):
protein_ids.append(record.id)
sequences.append(str(record.seq))
desc = record.description
gene_matches = gene_pattern.findall(desc)
if gene_matches:
gene_id = gene_matches[0]
else:
gene_id = None
gene_ids.append(gene_id)
transcript_matches = transcript_pattern.findall(desc)
if transcript_matches:
transcript_id = transcript_matches[0]
else:
transcript_id = None
transcript_ids.append(transcript_id)
df = pd.DataFrame({
'protein' : sequences,
'gene_id' : gene_ids,
'protein_id' : protein_ids,
'transcript_id': transcript_ids})
return df
#if filter_amino_acids:
# return df.ix[df.protein.str.contains(bad_amino_acids)]
#else:
# return df
def _generate_counts(src_filename, peptide_lengths, nrows):
epitope_counts = {}
get_count = epitope_counts.get
with open(src_filename, 'r') as f:
seqs = [str(record.seq) for record in Bio.SeqIO.parse(f, "fasta")]
print "Generating substrings of length %s" % (peptide_lengths,)
pbar = ProgressBar(maxval = len(seqs)).start()
for seq_num, seq in enumerate(seqs):
seq_len = len(seq)
if nrows and seq_num > nrows:
break
for size in peptide_lengths:
for i in xrange(seq_len - size + 1):
epitope = seq[i:i+size]
if epitope in epitope_counts:
epitope_counts[epitope] += 1
else:
epitope_counts[epitope] = 1
pbar.update(seq_num+1)
pbar.finish()
return dataframe_from_counts(epitope_counts)
def _generate_set(src_filename, peptide_lengths, nrows):
peptides = set([])
with open(src_filename, 'r') as f:
seqs = [str(record.seq) for record in Bio.SeqIO.parse(f, "fasta")]
print "Generating substrings of length %s" % (peptide_lengths,)
pbar = ProgressBar(maxval = len(seqs)).start()
for seq_num, seq in enumerate(seqs):
if nrows and seq_num > nrows:
break
for size in peptide_lengths:
for i in xrange(len(seq) - size + 1):
peptides.add(seq[i:i+size])
pbar.update(seq_num+1)
pbar.finish()
return peptides
def load_peptide_counts(peptide_length = [8,9,10,11], nrows = None):
"""
List of all reference peptides encoded in a reference human exome
"""
peptide_lengths = int_or_seq(peptide_length)
lens = "_".join(str(n) for n in peptide_lengths)
cache_filename = \
"reference_peptide_counts_" + lens + "_nrows_" + str(nrows) + ".csv"
def save_counts(src_path, dst_path):
counts = _generate_counts(src_path, peptide_lengths, nrows)
print "Saving %s" % dst_path
counts.to_csv(dst_path)
return counts
return fetch_and_transform(
transformed_filename = cache_filename,
transformer = save_counts,
loader = pd.read_csv,
source_filename = FASTA_FILENAME,
source_url = FULL_URL)
def load_peptide_set(peptide_length = [8,9,10,11], nrows = None):
peptide_lengths = int_or_seq(peptide_length)
lens = "_".join(str(n) for n in peptide_lengths)
cache_filename = \
"reference_peptide_set_" + lens + "_nrows_" + str(nrows) + ".pickle.gz"
def save_set(src_path, dst_path):
string_set = _generate_set(src_path, peptide_lengths, nrows)
with GzipFile(dst_path, 'w') as out_file:
out_file.write(cPickle.dumps(string_set))
return string_set
def load_set(path):
result = None
with GzipFile(path, 'r') as in_file:
result = cPickle.loads(in_file.read())
return result
return fetch_and_transform(
transformed_filename = cache_filename,
transformer = save_set,
loader = load_set,
source_filename = FASTA_FILENAME,
source_url = FULL_URL)
|
{
"content_hash": "ae0e83a1cfb0eaf73864304c3683e1c7",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 34.211920529801326,
"alnum_prop": 0.5861401471157569,
"repo_name": "cpcloud/pepdata",
"id": "0b4be65f083d774f0e07f1919266e10e7657eb9a",
"size": "5766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pepdata/reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114025"
}
],
"symlink_target": ""
}
|
import pygame as pg
from settings import *
from tilemap import collide_hit_rect
vec = pg.math.Vector2
class Player(pg.sprite.Sprite):
def __init__(self, game, x, y):
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.player_img
self.rect = self.image.get_rect()
self.hit_rect = PLAYER_HIT_RECT
self.hit_rect.center = self.rect.center
self.vel = vec(0, 0)
self.pos = vec(x, y) * TILESIZE
self.rot = 0
def get_keys(self):
self.rot_speed = 0
self.vel = vec(0, 0)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] or keys[pg.K_a]:
self.rot_speed = PLAYER_ROT_SPEED
if keys[pg.K_RIGHT] or keys[pg.K_d]:
self.rot_speed = -PLAYER_ROT_SPEED
if keys[pg.K_UP] or keys[pg.K_w]:
self.vel = vec(PLAYER_SPEED, 0).rotate(-self.rot)
if keys[pg.K_DOWN] or keys[pg.K_s]:
self.vel = vec(-PLAYER_SPEED / 2, 0).rotate(-self.rot)
def collide_with_walls(self, dir):
if dir == 'x':
hits = pg.sprite.spritecollide(self, self.game.walls, False, collide_hit_rect)
if hits:
if self.vel.x > 0:
self.pos.x = hits[0].rect.left - self.hit_rect.width / 2
if self.vel.x < 0:
self.pos.x = hits[0].rect.right + self.hit_rect.width / 2
self.vel.x = 0
self.hit_rect.centerx = self.pos.x
if dir == 'y':
hits = pg.sprite.spritecollide(self, self.game.walls, False, collide_hit_rect)
if hits:
if self.vel.y > 0:
self.pos.y = hits[0].rect.top - self.hit_rect.height / 2
if self.vel.y < 0:
self.pos.y = hits[0].rect.bottom + self.hit_rect.height / 2
self.vel.y = 0
self.hit_rect.centery = self.pos.y
def update(self):
self.get_keys()
self.rot = (self.rot + self.rot_speed * self.game.dt) % 360
self.image = pg.transform.rotate(self.game.player_img, self.rot)
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.pos += self.vel * self.game.dt
self.hit_rect.centerx = self.pos.x
self.collide_with_walls('x')
self.hit_rect.centery = self.pos.y
self.collide_with_walls('y')
self.rect.center = self.hit_rect.center
class Mob(pg.sprite.Sprite):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.mobs
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.mob_img
self.rect = self.image.get_rect()
self.pos = vec(x, y) * TILESIZE
self.rect.center = self.pos
self.rot = 0
def update(self):
self.rot = (self.game.player.pos - self.pos).angle_to(vec(1, 0))
self.image = pg.transform.rotate(self.game.mob_img, self.rot)
self.rect = self.image.get_rect()
self.rect.center = self.pos
class Wall(pg.sprite.Sprite):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.walls
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.wall_img
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = x * TILESIZE
self.rect.y = y * TILESIZE
|
{
"content_hash": "b2f1b36f36bbbaa14ecdd0c43704d9d2",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 90,
"avg_line_length": 37.98913043478261,
"alnum_prop": 0.5484978540772533,
"repo_name": "kidscancode/gamedev",
"id": "d830bc3d0bba189760a40cd8195d828334db1487",
"size": "3495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/tilemap/part 07/sprites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1690942"
}
],
"symlink_target": ""
}
|
"""Tests for random-number generation ops in the XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import googletest
class RandomOpsTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def _random_types(self):
return set(self.numeric_types) - set(self.complex_types)
def _testRngIsNotConstant(self, rng, dtype):
# Tests that 'rng' does not always return the same value.
with self.test_session() as sess:
with self.test_scope():
x = rng(dtype)
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
y = sess.run(x)
z = sess.run(x)
w = sess.run(x)
# We use exact equality here. If the random-number generator is producing
# deterministic output, all three outputs will be bitwise identical.
self.assertTrue((not np.array_equal(y, z)) or
(not np.array_equal(z, w)) or (not np.array_equal(y, w)))
def testRandomUniformIsNotConstant(self):
def rng(dtype):
return random_ops.random_uniform(shape=[2], dtype=dtype, maxval=1000000)
for dtype in self._random_types():
self._testRngIsNotConstant(rng, dtype)
def testRandomNormalIsNotConstant(self):
def rng(dtype):
return random_ops.random_normal(shape=[2], dtype=dtype)
# TODO(b/34339814): implement inverse erf support for non-F32 types.
dtype = dtypes.float32
self._testRngIsNotConstant(rng, dtype)
def testRandomUniformIsInRange(self):
for dtype in self._random_types():
with self.test_session() as sess:
with self.test_scope():
x = random_ops.random_uniform(
shape=[1000], dtype=dtype, minval=-2, maxval=33)
y = sess.run(x)
self.assertTrue((y >= -2).sum() == 1000)
self.assertTrue((y < 33).sum() == 1000)
def testTruncatedNormalIsNotConstant(self):
def rng(dtype):
return random_ops.truncated_normal(shape=[2], dtype=dtype)
# TODO(b/34339814): implement inverse erf support for non-F32 types.
self._testRngIsNotConstant(rng, dtypes.float32)
def testTruncatedNormalIsInRange(self):
count = 10000000
# TODO(b/34339814): implement inverse erf support for non-F32 types.
for dtype in [dtypes.float32]:
with self.test_session() as sess:
with self.test_scope():
x = random_ops.truncated_normal(shape=[count], dtype=dtype, seed=42)
y = sess.run(x)
def normal_cdf(x):
return .5 * math.erfc(-x / math.sqrt(2))
def normal_pdf(x):
return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)
def probit(x, sess=sess):
return sess.run(special_math.ndtri(x))
a = -2.
b = 2.
mu = 0.
sigma = 1.
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
z = normal_cdf(beta) - normal_cdf(alpha)
self.assertTrue((y >= a).sum() == count)
self.assertTrue((y <= b).sum() == count)
# For more information on these calculations, see:
# Burkardt, John. "The Truncated Normal Distribution".
# Department of Scientific Computing website. Florida State University.
expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma
actual_mean = np.mean(y)
self.assertAllClose(actual_mean, expected_mean, atol=2e-4)
expected_median = mu + probit(
(normal_cdf(alpha) + normal_cdf(beta)) / 2.) * sigma
actual_median = np.median(y)
self.assertAllClose(actual_median, expected_median, atol=8e-4)
expected_variance = sigma**2 * (1 + (
(alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (
(normal_pdf(alpha) - normal_pdf(beta)) / z)**2)
actual_variance = np.var(y)
self.assertAllClose(actual_variance, expected_variance, rtol=3e-4)
def testShuffle1d(self):
with self.test_session() as sess:
with self.test_scope():
x = math_ops.range(1 << 16)
shuffle = random_ops.random_shuffle(x)
result = sess.run(shuffle)
expected = range(1 << 16)
# Compare sets to avoid randomness behavior changes but make sure still
# have all the values.
self.assertAllEqual(set(result), set(expected))
def testShuffle2d(self):
with self.test_session() as sess:
with self.test_scope():
x = array_ops.diag(math_ops.range(20))
shuffle = random_ops.random_shuffle(x)
result = sess.run(shuffle)
expected = np.diag(range(20)).flatten()
# Compare sets to avoid randomness behavior changes but make sure still
# have all the values.
self.assertAllEqual(len(result.flatten()), len(expected))
self.assertAllEqual(set(result.flatten()), set(expected))
if __name__ == '__main__':
googletest.main()
|
{
"content_hash": "f2a50f4ab71f6231941981c4c22c6ed6",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 35.145695364238414,
"alnum_prop": 0.6431128697946109,
"repo_name": "jart/tensorflow",
"id": "14c5e7a975e478ca6ceed37c28339b40612801c8",
"size": "5996",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/random_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "310149"
},
{
"name": "C++",
"bytes": "44871792"
},
{
"name": "CMake",
"bytes": "206735"
},
{
"name": "Go",
"bytes": "1163781"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "799574"
},
{
"name": "Jupyter Notebook",
"bytes": "2455980"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52050"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "38792793"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447966"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
if foo():
# <- keyword
pass
# <- keyword
elif bar():
# <- keyword
pass
else:
# <- keyword
foo
return
# ^ keyword
raise e
# ^ keyword
for i in foo():
# <- keyword
# ^ variable
# ^ operator
# ^ function
continue
# <- keyword
break
# <- keyword
a and b or c
# ^ operator
# ^ variable
# ^ operator
|
{
"content_hash": "1b9324a15c4ce3f1f54e6d322dffcd62",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 19,
"avg_line_length": 11.733333333333333,
"alnum_prop": 0.5113636363636364,
"repo_name": "tree-sitter/tree-sitter-python",
"id": "e80fac2241480f25db7ba1eb9c49fe580cdfdfe1",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/highlight/keywords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "869"
},
{
"name": "JavaScript",
"bytes": "22768"
},
{
"name": "Python",
"bytes": "2084"
},
{
"name": "Rust",
"bytes": "3380"
},
{
"name": "Scheme",
"bytes": "2111"
},
{
"name": "Shell",
"bytes": "974"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'aps_purchasing.tests.south_settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "9c9c6d0b26bb10aaa679159dc992095f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 27.3,
"alnum_prop": 0.652014652014652,
"repo_name": "bitmazk/django-aps-purchasing",
"id": "740633361800bbe76fcd9f0bfef764cf76619fbe",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4554"
},
{
"name": "Python",
"bytes": "75047"
},
{
"name": "Shell",
"bytes": "5131"
}
],
"symlink_target": ""
}
|
from rml2txt import parseString, parseNode
""" This engine is the minimalistic renderer of RML documents into text files,
using spaces and newlines to format.
It was needed in some special applications, where legal reports need to be
printed in special (dot-matrix) printers.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "b8068ba5df4e220d2c72e46118868d83",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 78,
"avg_line_length": 30.25,
"alnum_prop": 0.7630853994490359,
"repo_name": "diogocs1/comps",
"id": "c80d8376272b56e212447fa612b6ff49086b5991",
"size": "1351",
"binary": false,
"copies": "374",
"ref": "refs/heads/master",
"path": "web/openerp/report/render/rml2txt/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
import time
import warnings
import numpy as np
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
print('yo')
#Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in xrange(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
def load_data(filename, seq_len, normalise_window):
f = open(filename, 'r').read()
data = f.split('\n')
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
if normalise_window:
result = normalise_windows(result)
result = np.array(result)
row = round(0.9 * result.shape[0])
train = result[:int(row), :]
np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = result[int(row):, :-1]
y_test = result[int(row):, -1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
def normalise_windows(window_data):
normalised_data = []
for window in window_data:
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def build_model(layers):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers[3]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time.time() - start)
return model
def predict_point_by_point(model, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
predicted = model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
def predict_sequence_full(model, data, window_size):
#Shift the window by 1 new prediction each time, re-run predictions on new window
curr_frame = data[0]
predicted = []
for i in xrange(len(data)):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
return predicted
def predict_sequences_multiple(model, data, window_size, prediction_len):
#Predict sequence of 50 steps before shifting prediction run forward by 50 steps
prediction_seqs = []
for i in xrange(len(data)/prediction_len):
curr_frame = data[i*prediction_len]
predicted = []
for j in xrange(prediction_len):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
|
{
"content_hash": "17709e865fd867c0a49368458fbd7036",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 113,
"avg_line_length": 33.38532110091743,
"alnum_prop": 0.6507282220390217,
"repo_name": "anandha2017/udacity",
"id": "9e4c46328efff69ebcd897f3c8958fb766f5aa6f",
"size": "3639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nd101 Deep Learning Nanodegree Foundation/DockerImages/22_Sirajs_stock_prediction/notebooks/How-to-Predict-Stock-Prices-Easily/lstm.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1556"
},
{
"name": "HTML",
"bytes": "7027054"
},
{
"name": "JavaScript",
"bytes": "29515"
},
{
"name": "Jupyter Notebook",
"bytes": "79801834"
},
{
"name": "Makefile",
"bytes": "6940"
},
{
"name": "Python",
"bytes": "435515"
}
],
"symlink_target": ""
}
|
"""Read the balance of your bank accounts via FinTS."""
from __future__ import annotations
from collections import namedtuple
from datetime import timedelta
import logging
from typing import Any
from fints.client import FinTS3PinTanClient
from fints.dialog import FinTSDialogError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_PIN, CONF_URL, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=4)
ICON = "mdi:currency-eur"
BankCredentials = namedtuple("BankCredentials", "blz login pin url")
CONF_BIN = "bank_identification_number"
CONF_ACCOUNTS = "accounts"
CONF_HOLDINGS = "holdings"
CONF_ACCOUNT = "account"
ATTR_ACCOUNT = CONF_ACCOUNT
ATTR_BANK = "bank"
ATTR_ACCOUNT_TYPE = "account_type"
SCHEMA_ACCOUNTS = vol.Schema(
{
vol.Required(CONF_ACCOUNT): cv.string,
vol.Optional(CONF_NAME, default=None): vol.Any(None, cv.string),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACCOUNTS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
vol.Optional(CONF_HOLDINGS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the sensors.
Login to the bank and get a list of existing accounts. Create a
sensor for each account.
"""
credentials = BankCredentials(
config[CONF_BIN], config[CONF_USERNAME], config[CONF_PIN], config[CONF_URL]
)
fints_name = config.get(CONF_NAME, config[CONF_BIN])
account_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_ACCOUNTS]
}
holdings_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_HOLDINGS]
}
client = FinTsClient(credentials, fints_name)
balance_accounts, holdings_accounts = client.detect_accounts()
accounts: list[SensorEntity] = []
for account in balance_accounts:
if config[CONF_ACCOUNTS] and account.iban not in account_config:
_LOGGER.info("Skipping account %s for bank %s", account.iban, fints_name)
continue
if not (account_name := account_config.get(account.iban)):
account_name = f"{fints_name} - {account.iban}"
accounts.append(FinTsAccount(client, account, account_name))
_LOGGER.debug("Creating account %s for bank %s", account.iban, fints_name)
for account in holdings_accounts:
if config[CONF_HOLDINGS] and account.accountnumber not in holdings_config:
_LOGGER.info(
"Skipping holdings %s for bank %s", account.accountnumber, fints_name
)
continue
account_name = holdings_config.get(account.accountnumber)
if not account_name:
account_name = f"{fints_name} - {account.accountnumber}"
accounts.append(FinTsHoldingsAccount(client, account, account_name))
_LOGGER.debug(
"Creating holdings %s for bank %s", account.accountnumber, fints_name
)
add_entities(accounts, True)
class FinTsClient:
"""Wrapper around the FinTS3PinTanClient.
Use this class as Context Manager to get the FinTS3Client object.
"""
def __init__(self, credentials: BankCredentials, name: str) -> None:
"""Initialize a FinTsClient."""
self._credentials = credentials
self.name = name
@property
def client(self):
"""Get the client object.
As the fints library is stateless, there is not benefit in caching
the client objects. If that ever changes, consider caching the client
object and also think about potential concurrency problems.
"""
return FinTS3PinTanClient(
self._credentials.blz,
self._credentials.login,
self._credentials.pin,
self._credentials.url,
)
def detect_accounts(self):
"""Identify the accounts of the bank."""
balance_accounts = []
holdings_accounts = []
for account in self.client.get_sepa_accounts():
try:
self.client.get_balance(account)
balance_accounts.append(account)
except IndexError:
# account is not a balance account.
pass
except FinTSDialogError:
# account is not a balance account.
pass
try:
self.client.get_holdings(account)
holdings_accounts.append(account)
except FinTSDialogError:
# account is not a holdings account.
pass
return balance_accounts, holdings_accounts
class FinTsAccount(SensorEntity):
"""Sensor for a FinTS balance account.
A balance account contains an amount of money (=balance). The amount may
also be negative.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs balance account."""
self._client = client
self._account = account
self._attr_name = name
self._attr_icon = ICON
self._attr_extra_state_attributes = {
ATTR_ACCOUNT: self._account.iban,
ATTR_ACCOUNT_TYPE: "balance",
}
if self._client.name:
self._attr_extra_state_attributes[ATTR_BANK] = self._client.name
def update(self) -> None:
"""Get the current balance and currency for the account."""
bank = self._client.client
balance = bank.get_balance(self._account)
self._attr_native_value = balance.amount.amount
self._attr_native_unit_of_measurement = balance.amount.currency
_LOGGER.debug("updated balance of account %s", self.name)
class FinTsHoldingsAccount(SensorEntity):
"""Sensor for a FinTS holdings account.
A holdings account does not contain money but rather some financial
instruments, e.g. stocks.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs holdings account."""
self._client = client
self._attr_name = name
self._account = account
self._holdings: list[Any] = []
self._attr_icon = ICON
self._attr_native_unit_of_measurement = "EUR"
def update(self) -> None:
"""Get the current holdings for the account."""
bank = self._client.client
self._holdings = bank.get_holdings(self._account)
self._attr_native_value = sum(h.total_value for h in self._holdings)
@property
def extra_state_attributes(self) -> dict:
"""Additional attributes of the sensor.
Lists each holding of the account with the current value.
"""
attributes = {
ATTR_ACCOUNT: self._account.accountnumber,
ATTR_ACCOUNT_TYPE: "holdings",
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
for holding in self._holdings:
total_name = f"{holding.name} total"
attributes[total_name] = holding.total_value
pieces_name = f"{holding.name} pieces"
attributes[pieces_name] = holding.pieces
price_name = f"{holding.name} price"
attributes[price_name] = holding.market_value
return attributes
|
{
"content_hash": "3ec23eec5ecdb2affbe4df314e60340b",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 85,
"avg_line_length": 33.766949152542374,
"alnum_prop": 0.6408583260133015,
"repo_name": "rohitranjan1991/home-assistant",
"id": "4b6cb336bdea23179b74c5123d027430035281df",
"size": "7969",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/fints/sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from easyprocess import EasyProcess
from image_debug import img_debug
from nose.tools import eq_, with_setup
from pyvirtualdisplay import Display
from PIL import ImageChops
import pyscreenshot
from config import bbox_ls
def display_size():
# http://www.cyberciti.biz/faq/how-do-i-find-out-screen-resolution-of-my-linux-desktop/
# xdpyinfo | grep 'dimensions:'
for x in EasyProcess('xdpyinfo').call().stdout.splitlines():
if 'dimensions:' in x:
screen_width, screen_height = map(
int, x.strip().split()[1].split('x'))
return screen_width, screen_height
def check_size(backend, bbox):
for childprocess in [True]:
im = pyscreenshot.grab(
bbox=bbox,
backend=backend,
childprocess=childprocess,
)
img_debug(im, backend + str(bbox))
if bbox:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
else:
width, height = display_size()
eq_(width, im.size[0])
eq_(height, im.size[1])
def backend_size(backend):
with Display(visible=0, size=(800, 600)):
for bbox in bbox_ls:
print('bbox: %s' % (bbox,))
print('backend: %s' % backend)
check_size(backend, bbox)
|
{
"content_hash": "956a784009b4b166f4fb54a4e2298392",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 91,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5953488372093023,
"repo_name": "daodaoliang/pyscreenshot",
"id": "aeb61503174add0ac6294d6a73634ab38d0ef7da",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/size.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30137"
}
],
"symlink_target": ""
}
|
""" Wrapper for the foodlog system
This wrapper allows the foodlog cgi-bin programs to be run either by a
user who is logged in (with BASIC-AUTH) or one who is not. Those programs
can authorize appropriate operations (viewing and/or editing) the dieters
data. This is used particularly to allow a dietitian to view the data, but
not changes it.
A copy (or a link to a copy) is placed in one directory which is protected
and one that is not.
The wrapper will call the actual top level of the program, which can use
os.environ.get('REMOTE_USER') to determine if someone is logged on, and if
so, whom.
This wrapper is kept intentionally tiny; all the work that can be is kept
in the included program
"""
from foodlog import Dispatch
Dispatch()
|
{
"content_hash": "ab1c5b3328221a23a0853d020759e15e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 36.13636363636363,
"alnum_prop": 0.7257861635220125,
"repo_name": "MikeBirdsall/food-log",
"id": "8510d827a5a4363c79584be60fb913868e957677",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foodlog/run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "780"
},
{
"name": "HTML",
"bytes": "19657"
},
{
"name": "Python",
"bytes": "81766"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
}
|
class DSet(object):
"""Disjoint set using union quick find implementation"""
def __init__(self, data, getter=lambda p: p):
self._map = {getter(item): i for i, item in enumerate(data)}
self._index = list(range(len(data)))
self._getter = getter
def union(self, p, q):
"""Union of two elements
"""
p_idx = self._map[self._getter(p)]
q_idx = self._map[self._getter(q)]
# Update all previous elements to the new root
tmp_value = self._index[p_idx]
for i in range(len(self._index)):
if self._index[i] == tmp_value:
self._index[i] = self._index[q_idx]
def connected(self, p, q):
p_val, q_val = map(self._getter, [p, q])
return self._index[self._map[p_val]] == self._index[self._map[q_val]]
if __name__ == '__main__':
dset = DSet(['A', 'B', 'C', 'D', 'E'])
dset.union('A', 'B')
dset.union('B', 'E')
dset.union('E', 'D')
assert dset.connected('A', 'E') is True
assert dset.connected('D', 'C') is False
|
{
"content_hash": "5acfcfab30f629298be06d65ff0c0b8d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 31.323529411764707,
"alnum_prop": 0.5333333333333333,
"repo_name": "prawn-cake/data_structures",
"id": "28360032602e9e43309b9b3de21084f94551982a",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "structures/disjoint_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50232"
}
],
"symlink_target": ""
}
|
import os
import signal
import subprocess
import sys
import psutil
from jupyter_helpers import notebook
def test_get_session():
sm = notebook.SessionManager()
session = sm.get_session()
session.thread.stop()
process = psutil.Process(session.process.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
return session
|
{
"content_hash": "ed6ef4d39cb2ed81da5b7f892b4bb47a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.7139107611548556,
"repo_name": "Lucaszw/jupyter-helpers",
"id": "2dc0f7dce768f86f48ebb66ca51c6a5689765d5c",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jupyter_helpers/tests/test_notebook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1041"
},
{
"name": "Python",
"bytes": "18670"
},
{
"name": "Shell",
"bytes": "1041"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys, os, re
import contextlib
@contextlib.contextmanager
def cd(newpath):
"""
Change the current working directory to `newpath`, temporarily.
If the old current working directory no longer exists, do not return back.
"""
oldpath = os.getcwd()
os.chdir(newpath)
try:
yield
finally:
try:
os.chdir(oldpath)
except OSError:
# If oldpath no longer exists, stay where we are.
pass
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.3":
raise RuntimeError("Sphinx 1.3 or newer required")
# Environment variable to know if the docs are being built on rtd.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
print
print("Building on ReadTheDocs: {}".format(on_rtd))
print
print("Current working directory: {}".format(os.path.abspath(os.curdir)))
print("Python: {}".format(sys.executable))
if on_rtd:
# Build is not via Makefile (yet).
# So we manually build the examples and gallery.
import subprocess
with cd('..'):
# The Makefile is run from networkx/doc, so we need to move there
# from networkx/doc/source (which holds conf.py).
py = sys.executable
subprocess.call([py, 'make_gallery.py'])
subprocess.call([py, 'make_examples_rst.py', '../examples', 'source'])
# If your extensions are in another directory, add it here.
# These locations are relative to conf.py
sys.path.append(os.path.abspath('../sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
#'sphinxcontrib.bibtex',
#'IPython.sphinxext.ipython_console_highlighting',
#'IPython.sphinxext.ipython_directive',
]
# generate autosummary pages
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates','../rst_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'NetworkX'
copyright = '2015, NetworkX Developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import networkx
version = networkx.__version__
# The full version, including dev info
release = networkx.__version__.replace('_','')
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = ['reference/pdf_reference']
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'friendly'
pygments_style = 'sphinx'
# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)
modindex_common_prefix=['networkx.']
doctest_global_setup="import networkx as nx"
# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``
napoleon_use_param = False
# Options for HTML output
# -----------------------
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_options = {
# "rightsidebar": "true",
# "relbarbgcolor: "black"
#}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'networkx.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = 'index.html'
html_index = 'contents.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html','gallery':'gallery.html'}
html_additional_pages = {'gallery':'gallery.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
html_use_opensearch = 'http://networkx.github.io'
# Output file base name for HTML help builder.
htmlhelp_basename = 'NetworkX'
pngmath_use_preview = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('tutorial/index', 'networkx_tutorial.tex',
'NetworkX Tutorial',
'Aric Hagberg, Dan Schult, Pieter Swart', 'howto', 1),
('reference/pdf_reference', 'networkx_reference.tex',
'NetworkX Reference',
'Aric Hagberg, Dan Schult, Pieter Swart', 'manual', 1)]
#latex_appendices = ['installing']#,'legal'],'citing','credits','history']
#latex_appendices = ['credits']
# Intersphinx mapping
intersphinx_mapping = {'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy/': None,
}
# For trac custom roles
default_role = 'obj'
trac_url = 'https://networkx.lanl.gov/trac/'
mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML'
numpydoc_show_class_members = False
|
{
"content_hash": "d865ce3b550cdbf6b3a300c24b782eec",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 88,
"avg_line_length": 30.70909090909091,
"alnum_prop": 0.6795441089402013,
"repo_name": "NvanAdrichem/networkx",
"id": "56fd48f78dd1e26d7ea5344304b0e377cf67e1f5",
"size": "7256",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3257845"
}
],
"symlink_target": ""
}
|
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
words = text.split()
result = []
for i in range(len(words) - 2):
if words[i] == first and words[i + 1] == second:
result.append(words[i + 2])
return result
|
{
"content_hash": "f60eb834a446874175e42018cd7df69e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5288461538461539,
"repo_name": "jiadaizhao/LeetCode",
"id": "836fbcd1c04622d217d524f966a6f83e598e2ca4",
"size": "312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1001-1100/1078-Occurrences After Bigram/1078-Occurrences After Bigram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
}
|
"""Run the EasyInstall command"""
if __name__ == '__main__':
from setuptools.command.easy_install import main
main()
|
{
"content_hash": "36551369ab8f3306ac2452f21fed89d7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 52,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.6299212598425197,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "080864cfb64911fb81fbd4a9acd06676d6e09515",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/site-packages/easy_install.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
}
|
import sys; sys.path.insert(0, '.')
import logging
logging.basicConfig(level=logging.INFO)
from sanic import Sanic, response
from covador import item
from covador.sanic import query_string, json_body, form, params, args
app = Sanic()
@app.route('/qs/')
@query_string(boo=str)
async def qs_view(request, boo):
return response.text(boo)
@app.route('/qs-multi/')
@query_string(boo=item(str, multi=True))
async def qs_multi_view(request, boo):
return response.text(','.join(boo))
@app.route('/form/', methods=['POST'])
@form(p1=str, p2=int)
async def form_view(request, p1, p2):
return response.text('{0}.{1}'.format(p1, p2))
@app.route('/params/', methods=['POST'])
@params(p1=str, p2=int)
async def params_view(request, p1, p2):
return response.text('{0}.{1}'.format(p1, p2))
@app.route('/args/<boo>/')
@args(boo=str)
async def args_view(request, boo):
return response.text(boo)
@app.route('/json/', methods=['POST'])
@json_body(boo=str)
async def json_view(request, boo):
return response.text(boo)
def main():
app.run(host='127.0.0.1', port=5000, debug=False)
if __name__ == '__main__':
main()
|
{
"content_hash": "89dc8c4743ae5abab0512cad8c20aea4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 69,
"avg_line_length": 21.566037735849058,
"alnum_prop": 0.663167104111986,
"repo_name": "baverman/covador",
"id": "eadf6a9da7de8021276138a01e99df3b72bf67da",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration/sanic_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74500"
},
{
"name": "Shell",
"bytes": "3075"
}
],
"symlink_target": ""
}
|
import sys
from ged4py.parser import GedcomReader
with GedcomReader(sys.argv[1]) as parser:
# iterate over each FAM record in a file
for i, fam in enumerate(parser.records0("FAM")):
print(f"{i}:")
# Get records for spouses, FAM record contains pointers to INDI
# records but sub_tag knows how to follow the pointers and return
# the referenced records instead.
husband, wife = fam.sub_tag("HUSB"), fam.sub_tag("WIFE")
if husband:
print(f" husband: {husband.name.format()}")
if wife:
print(f" wife: {wife.name.format()}")
# Get _value_ of the MARR/DATE tag
marr_date = fam.sub_tag_value("MARR/DATE")
if marr_date:
print(f" marriage date: {marr_date}")
# access all CHIL records, sub_tags method returns list (possibly empty)
children = fam.sub_tags("CHIL")
for child in children:
# print name and date of birth
print(f" child: {child.name.format()}")
birth_date = child.sub_tag_value("BIRT/DATE")
if birth_date:
print(f" birth date: {birth_date}")
|
{
"content_hash": "9be9a07b7bb3027fd226b35fabf73880",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 38.225806451612904,
"alnum_prop": 0.580590717299578,
"repo_name": "andy-z/ged4py",
"id": "68223ae43c5d1cc0ae971d1cb80d80bb2d43530a",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/example_code/example2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "118486"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil.parser import parse
from sqlalchemy import Boolean, Integer, BigInteger, Float, Date, \
String
from sqlalchemy.dialects.postgresql import TIMESTAMP, TIME
NoneType = type(None)
NULL_VALUES = ('na', 'n/a', 'none', 'null', '.', '', ' ')
TRUE_VALUES = ('yes', 'y', 'true', 't',)
FALSE_VALUES = ('no', 'n', 'false', 'f',)
DEFAULT_DATETIME = datetime.datetime(2999, 12, 31, 0, 0, 0)
NULL_DATE = datetime.date(2999, 12, 31)
NULL_TIME = datetime.time(0, 0, 0)
def normalize_column_type(l):
"""
Docs to come...
"""
null_values = False
# Convert "NA", "N/A", etc. to null types.
for i, x in enumerate(l):
if x is not None and x.lower() in NULL_VALUES:
l[i] = None
null_values = True
# Are they boolean?
try:
for i, x in enumerate(l):
if x == '' or x is None:
raise ValueError('Not boolean')
elif x.lower() in TRUE_VALUES:
continue
elif x.lower() in FALSE_VALUES:
continue
else:
raise ValueError('Not boolean')
return Boolean, null_values
except ValueError:
pass
# Are they integers?
try:
normal_types_set = set()
add = normal_types_set.add
for i, x in enumerate(l):
if x == '' or x is None:
continue
int_x = int(x.replace(',', ''))
if x[0] == '0' and int(x) != 0:
raise TypeError('Integer is padded with 0s, so treat it as a string instead.')
if x.isspace():
raise TypeError('Integer is nothing but spaces so falling back to string')
if 9000000000000000000 > int_x > 1000000000:
add(BigInteger)
elif 1000000000 > int_x:
add(Integer)
else:
raise ValueError
if BigInteger in normal_types_set:
return BigInteger, null_values
else:
return Integer, null_values
except TypeError:
pass
except ValueError:
pass
# Are they floats?
try:
for i, x in enumerate(l):
if x == '' or x is None:
continue
float_x = float(x.replace(',', ''))
return Float, null_values
except ValueError:
pass
# Are they datetimes?
try:
normal_types_set = set()
add = normal_types_set.add
ampm = False
for i, x in enumerate(l):
if x == '' or x is None:
add(NoneType)
continue
d = parse(x, default=DEFAULT_DATETIME)
# Is it only a time?
if d.date() == NULL_DATE:
add(TIME)
# Is it only a date?
elif d.time() == NULL_TIME:
add(Date)
# It must be a date and time
else:
add(TIMESTAMP)
if 'am' in x.lower():
ampm = True
if 'pm' in x.lower():
ampm = True
normal_types_set.discard(NoneType)
# If a mix of dates and datetimes, up-convert dates to datetimes
if normal_types_set == set([TIMESTAMP, Date]):
normal_types_set = set([TIMESTAMP])
# Datetimes and times don't mix -- fallback to using strings
elif normal_types_set == set([TIMESTAMP, TIME]):
normal_types_set = set([String])
# Dates and times don't mix -- fallback to using strings
elif normal_types_set == set([Date, TIME]):
normal_types_set = set([String])
elif normal_types_set == set([TIME]) and ampm:
normal_types_set = set([String])
return normal_types_set.pop(), null_values
except ValueError:
pass
except TypeError: #https://bugs.launchpad.net/dateutil/+bug/1247643
pass
# Don't know what they are, so they must just be strings
return String, null_values
|
{
"content_hash": "b98d188189424ec2bda4b53189d7a044",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 94,
"avg_line_length": 28.075862068965517,
"alnum_prop": 0.5163350528125767,
"repo_name": "tosseto/plenario",
"id": "3ea2e0bcadf369927339796193fbe5ae2acff080",
"size": "4094",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenario/utils/typeinference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27575"
},
{
"name": "HTML",
"bytes": "203047"
},
{
"name": "JavaScript",
"bytes": "516939"
},
{
"name": "Python",
"bytes": "275028"
}
],
"symlink_target": ""
}
|
import datetime
import os
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from fccdata.load import importer
from fccdata.models import en, am, hd
from fccdata.parser import EN, AM, HD
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the FCC data is stored.'),
)
help = ("Imports data from the FCC amateur license data download.")
def handle(self, *args, **options):
if settings.DEBUG:
print('You really should turn settings.DEBUG off, or else this script will eat a very large amount of your RAM.')
else:
input_path = options['path']
start_time = datetime.datetime.now()
print('%s existing en objects' % en.objects.all().count())
importer(os.path.join(input_path, 'EN.dat'), EN, id_column=1, delimiter='|', import_above=None)
print('%s existing am objects' % am.objects.all().count())
importer(os.path.join(input_path, 'AM.dat'), AM, id_column=1, delimiter='|', import_above=None)
print('%s existing hd objects' % hd.objects.all().count())
importer(os.path.join(input_path, 'HD.dat'), HD, id_column=1, delimiter='|', import_above=None)
print('Took %s' % (datetime.datetime.now() - start_time))
|
{
"content_hash": "aed631e482e5f560c703b9ad642494ac",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 125,
"avg_line_length": 39.2972972972973,
"alnum_prop": 0.6416781292984869,
"repo_name": "adamfast/fccdata",
"id": "a3e90b2a1832998ed9fc6c54323440b7a02debb5",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fccdata/management/commands/import_hams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85785"
}
],
"symlink_target": ""
}
|
"""
Incidents template page.
"""
from attrs import mutable
from ...page import Page
__all__ = ()
title = "Incidents"
@mutable(kw_only=True)
class IncidentsTemplatePage(Page):
"""
Incidents template page.
"""
name: str = title
|
{
"content_hash": "0625dc741a239dc00fa65155c6eb8e3d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 34,
"avg_line_length": 11.409090909090908,
"alnum_prop": 0.6294820717131474,
"repo_name": "burningmantech/ranger-ims-server",
"id": "dcc722cf576871f40df3ce1971c480ceeb1678ff",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ims/element/incident/incidents_template/_incidents_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2278"
},
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "31485"
},
{
"name": "JavaScript",
"bytes": "81644"
},
{
"name": "Python",
"bytes": "793543"
},
{
"name": "Shell",
"bytes": "12166"
}
],
"symlink_target": ""
}
|
class DS18X20(object):
def __init__(self, onewire):
self.ow = onewire
self.roms = [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
self.fp = True
try:
1/1
except TypeError:
self.fp = False # floatingpoint not supported
def isbusy(self):
"""
Checks wether one of the DS18x20 devices on the bus is busy
performing a temperature convertion
"""
return not self.ow.read_bit()
def start_conversion(self, rom=None):
"""
Start the temp conversion on one DS18x20 device.
Pass the 8-byte bytes object with the ROM of the specific device you want to read.
If only one DS18x20 device is attached to the bus you may omit the rom parameter.
"""
if (rom==None) and (len(self.roms)>0):
rom=self.roms[0]
if rom!=None:
rom = rom or self.roms[0]
ow = self.ow
ow.select_rom(rom)
ow.write_byte(0x44) # Convert Temp
def read_temp_async(self, rom=None):
"""
Read the temperature of one DS18x20 device if the convertion is complete,
otherwise return None.
"""
if self.isbusy():
return None
if (rom==None) and (len(self.roms)>0):
rom=self.roms[0]
if rom==None:
return None
else:
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
return self.convert_temp(rom[0], data)
def convert_temp(self, rom0, data):
"""
Convert the raw temperature data into degrees celsius and return as a fixed point with 2 decimal places.
"""
temp_lsb = data[0]
temp_msb = data[1]
if rom0 == 0x10:
if temp_msb != 0:
# convert negative number
temp_read = temp_lsb >> 1 | 0x80 # truncate bit 0 by shifting, fill high bit with 1.
temp_read = -((~temp_read + 1) & 0xff) # now convert from two's complement
else:
temp_read = temp_lsb >> 1 # truncate bit 0 by shifting
count_remain = data[6]
count_per_c = data[7]
if self.fp:
return temp_read - 25 + (count_per_c - count_remain) / count_per_c
else:
return 100 * temp_read - 25 + (count_per_c - count_remain) // count_per_c
elif rom0 == 0x28:
temp = None
if self.fp:
temp = (temp_msb << 8 | temp_lsb) / 16
else:
temp = (temp_msb << 8 | temp_lsb) * 100 // 16
if (temp_msb & 0xf8) == 0xf8: # for negative temperature
temp -= 0x1000
return temp
else:
assert False
|
{
"content_hash": "cec727386ff610b524965c90d3cd1f0e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 112,
"avg_line_length": 36.3,
"alnum_prop": 0.512396694214876,
"repo_name": "Hiverize/Sensorbeuten",
"id": "e952f1fa0eb5cccf750db8d428abe5e3b58d6170",
"size": "3118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ESP32/MicroPython/sensors/ds18x20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "9918"
},
{
"name": "C",
"bytes": "26513686"
},
{
"name": "C++",
"bytes": "1807957"
},
{
"name": "CSS",
"bytes": "362"
},
{
"name": "HTML",
"bytes": "5751"
},
{
"name": "Python",
"bytes": "117625"
},
{
"name": "Shell",
"bytes": "2940"
}
],
"symlink_target": ""
}
|
"""List and compare most used OpenStack cloud resources."""
import argparse
import json
import subprocess
import sys
from rally.cli import cliutils
from rally.common import objects
from rally.common.plugin import discover
from rally import consts
from rally import osclients
class ResourceManager(object):
REQUIRED_SERVICE = None
REPR_KEYS = ("id", "name", "tenant_id", "zone", "zoneName", "pool")
def __init__(self, clients):
self.clients = clients
def is_available(self):
if self.REQUIRED_SERVICE:
return self.REQUIRED_SERVICE in self.clients.services().values()
return True
@property
def client(self):
return getattr(self.clients, self.__class__.__name__.lower())()
def get_resources(self):
all_resources = []
cls = self.__class__.__name__.lower()
for prop in dir(self):
if not prop.startswith("list_"):
continue
f = getattr(self, prop)
resources = f() or []
resource_name = prop[5:][:-1]
for res in resources:
# NOTE(stpierre): It'd be nice if we could make this a
# dict, but then we get ordering issues. So a list of
# 2-tuples it must be.
res_repr = []
for key in self.REPR_KEYS + (resource_name,):
if isinstance(res, dict):
value = res.get(key)
else:
value = getattr(res, key, None)
if value:
res_repr.append((key, value))
if not res_repr:
raise ValueError("Failed to represent resource %r" % res)
res_repr.extend([("class", cls),
("resource_name", resource_name)])
all_resources.append(res_repr)
return all_resources
class Keystone(ResourceManager):
REQUIRED_SERVICE = consts.Service.KEYSTONE
def list_users(self):
return self.client.users.list()
def list_tenants(self):
if hasattr(self.client, "projects"):
return self.client.projects.list() # V3
return self.client.tenants.list() # V2
def list_roles(self):
return self.client.roles.list()
class Magnum(ResourceManager):
REQUIRED_SERVICE = consts.Service.MAGNUM
def list_baymodels(self):
return self.client.baymodels.list()
class Nova(ResourceManager):
REQUIRED_SERVICE = consts.Service.NOVA
def list_flavors(self):
return self.client.flavors.list()
def list_floating_ip_pools(self):
return self.client.floating_ip_pools.list()
def list_floating_ips(self):
return self.client.floating_ips.list()
def list_images(self):
return self.client.images.list()
def list_keypairs(self):
return self.client.keypairs.list()
def list_networks(self):
return self.client.networks.list()
def list_security_groups(self):
return self.client.security_groups.list(
search_opts={"all_tenants": True})
def list_servers(self):
return self.client.servers.list(
search_opts={"all_tenants": True})
def list_services(self):
return self.client.services.list()
def list_availability_zones(self):
return self.client.availability_zones.list()
class Neutron(ResourceManager):
REQUIRED_SERVICE = consts.Service.NEUTRON
def has_extension(self, name):
extensions = self.client.list_extensions().get("extensions", [])
return any(ext.get("alias") == name for ext in extensions)
def list_networks(self):
return self.client.list_networks()["networks"]
def list_subnets(self):
return self.client.list_subnets()["subnets"]
def list_routers(self):
return self.client.list_routers()["routers"]
def list_ports(self):
return self.client.list_ports()["ports"]
def list_floatingips(self):
return self.client.list_floatingips()["floatingips"]
def list_security_groups(self):
return self.client.list_security_groups()["security_groups"]
def list_health_monitors(self):
if self.has_extension("lbaas"):
return self.client.list_health_monitors()["health_monitors"]
def list_pools(self):
if self.has_extension("lbaas"):
return self.client.list_pools()["pools"]
def list_vips(self):
if self.has_extension("lbaas"):
return self.client.list_vips()["vips"]
class Glance(ResourceManager):
REQUIRED_SERVICE = consts.Service.GLANCE
def list_images(self):
return self.client.images.list()
class Heat(ResourceManager):
REQUIRED_SERVICE = consts.Service.HEAT
def list_resource_types(self):
return self.client.resource_types.list()
def list_stacks(self):
return self.client.stacks.list()
class Cinder(ResourceManager):
REQUIRED_SERVICE = consts.Service.CINDER
def list_availability_zones(self):
return self.client.availability_zones.list()
def list_backups(self):
return self.client.backups.list()
def list_volume_snapshots(self):
return self.client.volume_snapshots.list()
def list_volume_types(self):
return self.client.volume_types.list()
def list_volumes(self):
return self.client.volumes.list(
search_opts={"all_tenants": True})
class Senlin(ResourceManager):
REQUIRED_SERVICE = consts.Service.SENLIN
def list_clusters(self):
return self.client.clusters()
def list_profiles(self):
return self.client.profiles()
class Watcher(ResourceManager):
REQUIRED_SERVICE = consts.Service.WATCHER
REPR_KEYS = ("uuid", "name")
def list_audits(self):
return self.client.audit.list()
def list_audit_templates(self):
return self.client.audit_template.list()
def list_goals(self):
return self.client.goal.list()
def list_strategies(self):
return self.client.strategy.list()
def list_action_plans(self):
return self.client.action_plan.list()
class CloudResources(object):
"""List and compare cloud resources.
resources = CloudResources(auth_url=..., ...)
saved_list = resources.list()
# Do something with the cloud ...
changes = resources.compare(saved_list)
has_changed = any(changes)
removed, added = changes
"""
def __init__(self, **kwargs):
self.clients = osclients.Clients(objects.Credential(**kwargs))
def _deduplicate(self, lst):
"""Change list duplicates to make all items unique.
>>> resources._deduplicate(["a", "b", "c", "b", "b"])
>>> ['a', 'b', 'c', 'b (duplicate 1)', 'b (duplicate 2)'
"""
deduplicated_list = []
for value in lst:
if value in deduplicated_list:
ctr = 0
try_value = value
while try_value in deduplicated_list:
ctr += 1
try_value = "%s (duplicate %i)" % (value, ctr)
value = try_value
deduplicated_list.append(value)
return deduplicated_list
def list(self):
managers_classes = discover.itersubclasses(ResourceManager)
resources = []
for cls in managers_classes:
manager = cls(self.clients)
if manager.is_available():
resources.extend(manager.get_resources())
return sorted(self._deduplicate(resources))
def compare(self, with_list):
# NOTE(stpierre): Each resource is either a list of 2-tuples,
# or a list of lists. (JSON doesn't honor tuples, so when we
# load data from JSON our tuples get turned into lists.) It's
# easiest to do the comparison with sets, so we need to change
# it to a tuple of tuples so that it's hashable.
saved_resources = set(tuple(tuple(d) for d in r) for r in with_list)
current_resources = set(tuple(tuple(d) for d in r)
for r in self.list())
removed = saved_resources - current_resources
added = current_resources - saved_resources
return (sorted(removed), sorted(added))
def _print_tabular_resources(resources, table_label):
cliutils.print_list(
objs=[dict(r) for r in resources],
fields=("class", "resource_name", "identifiers"),
field_labels=("service", "resource type", "identifiers"),
table_label=table_label,
formatters={"identifiers":
lambda d: " ".join("%s:%s" % (k, v)
for k, v in d.items()
if k not in ("class", "resource_name"))}
)
print("")
def main():
parser = argparse.ArgumentParser(
description=("Save list of OpenStack cloud resources or compare "
"with previously saved list."))
parser.add_argument("--credentials",
type=argparse.FileType("r"),
metavar="<path/to/credentials.json>",
help="cloud credentials in JSON format")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--dump-list",
type=argparse.FileType("w"),
metavar="<path/to/output/list.json>",
help="dump resources to given file in JSON format")
group.add_argument("--compare-with-list",
type=argparse.FileType("r"),
metavar="<path/to/existent/list.json>",
help=("compare current resources with a list from "
"given JSON file"))
args = parser.parse_args()
if args.credentials:
config = json.load(args.credentials)
else:
config = json.loads(subprocess.check_output(["rally", "deployment",
"config"]))
config.update(config.pop("admin"))
del config["type"]
if "users" in config:
del config["users"]
resources = CloudResources(**config)
if args.dump_list:
resources_list = resources.list()
json.dump(resources_list, args.dump_list)
elif args.compare_with_list:
given_list = json.load(args.compare_with_list)
changes = resources.compare(with_list=given_list)
removed, added = changes
# filter out expected additions
expected = []
for resource_tuple in added:
resource = dict(resource_tuple)
if ((resource["class"] == "keystone" and
resource["resource_name"] == "role" and
resource["name"] == "_member_") or
(resource["class"] == "nova" and
resource["resource_name"] == "security_group" and
resource["name"] == "default")):
expected.append(resource_tuple)
for resource in expected:
added.remove(resource)
if removed:
_print_tabular_resources(removed, "Removed resources")
if added:
_print_tabular_resources(added, "Added resources (unexpected)")
if expected:
_print_tabular_resources(expected, "Added resources (expected)")
if any(changes):
return 0 # `1' will fail gate job
return 0
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "275b5df513aeb027670935aab344d432",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 79,
"avg_line_length": 30.522427440633244,
"alnum_prop": 0.5854080221300139,
"repo_name": "vganapath/rally",
"id": "a0981758fe6e071243d294bfc00ba8baba85127e",
"size": "12167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ci/osresources.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
}
|
from wrfpy.config import config
import csv
import os
import astral
from netCDF4 import Dataset
from netCDF4 import date2num
import numpy as np
import bisect
from datetime import datetime
import glob
from pathos.multiprocessing import ProcessPool as Pool
class readObsTemperature(config):
def __init__(self, dtobj, nstationtypes=None, dstationtypes=None):
config.__init__(self)
# optional define station types to be used
self.nstationtypes = nstationtypes # stationtypes at night
self.dstationtypes = dstationtypes # stationtypes during daytime
# define datestr
datestr = datetime.strftime(dtobj, '%Y-%m-%d_%H:%M:%S')
# define name of csv file
self.wrf_rundir = self.config['filesystem']['work_dir']
fname = 'obs_stations_' + datestr + '.csv'
self.csvfile = os.path.join(self.wrf_rundir, fname)
try:
# try to read an existing csv file
self.read_csv(datestr)
except IOError:
if self.config['options_urbantemps']['urban_stations']:
# reading existing csv file failed, start from scratch
self.urbStations = self.config['options_urbantemps']['urban_stations']
self.verify_input()
self.obs_temp_p(dtobj)
self.write_csv(datestr)
else:
raise
def verify_input(self):
'''
verify input and create list of files
'''
try:
f = Dataset(self.urbStations, 'r')
f.close()
self.filelist = [self.urbStations]
except IOError:
# file is not a netcdf file, assuming a txt file containing a
# list of netcdf files
if os.path.isdir(self.urbStations):
# path is actually a directory, not a file
self.filelist = glob.glob(os.path.join(self.urbStations, '*nc'))
else:
# re-raise error
raise
def obs_temp_p(self, dtobj):
'''
get observed temperature in amsterdam parallel
'''
self.dtobjP = dtobj
pool = Pool()
obs = pool.map(self.obs_temp, self.filelist)
self.obs = [ob for ob in obs if ob is not None]
def obs_temp(self, f):
'''
get observed temperature in amsterdam per station
'''
try:
obs = Dataset(f, 'r')
obs_lon = obs.variables['longitude'][0]
obs_lat = obs.variables['latitude'][0]
elevation = 0
try:
stationtype = obs.stationtype
except AttributeError:
stationtype = None
stobs = (obs_lat, obs_lon, elevation, stationtype)
use_station = self.filter_stationtype(stobs, self.dtobjP)
if use_station:
dt = obs.variables['time']
# convert datetime object to dt.units units
dtobj_num = date2num(self.dtobjP, units=dt.units,
calendar=dt.calendar)
# make use of the property that the array is already
# sorted to find the closest date
try:
ind = bisect.bisect_left(dt[:], dtobj_num)
except RuntimeError:
return
if ((ind == 0) or (ind == len(dt))):
return None
else:
am = np.argmin([abs(dt[ind]-dtobj_num),
abs(dt[ind-1]-dtobj_num)])
if (am == 0):
idx = ind
else:
idx = ind - 1
if abs((dt[:]-dtobj_num)[idx]) > 900:
# ignore observation if time difference
# between model and observation is > 15 minutes
return None
temp = obs.variables['temperature'][idx]
sname = f[:] # stationname
obs.close()
# append results to lists
obs_temp = temp
obs_stype = stationtype
obs_sname= sname
except IOError:
return None
except AttributeError:
return None
try:
return (obs_lat, obs_lon, obs_temp, obs_stype, obs_sname)
except UnboundLocalError:
return None
def filter_stationtype(self, stobs, dtobj):
'''
check if it is day or night based on the solar angle
construct location
'''
lat = stobs[0]
lon = stobs[1]
elevation = 0 # placeholder
loc = astral.Location(info=('name', 'region', lat, lon, 'UTC',
elevation))
solar_elevation = loc.solar_elevation(dtobj)
# set stime according to day/night based on solar angle
if (solar_elevation > 0):
stime = 'day'
else:
stime = 'night'
if ((stime == 'day') and self.dstationtypes):
try:
mask = any([x.lower() in stobs[3].lower() for
x in self.dstationtypes])
except AttributeError:
mask = False
elif ((stime == 'night') and self.nstationtypes):
try:
mask = any([x.lower() in stobs[3].lower() for
x in self.nstationtypes])
except AttributeError:
mask = False
else:
mask = True
return mask
def write_csv(self, datestr):
'''
write output of stations used to csv file
'''
with open(self.csvfile, 'wb') as out:
csv_out = csv.writer(out)
csv_out.writerow(['lat', 'lon', 'temperature', 'stationtype',
'stationname'])
for row in self.obs:
csv_out.writerow(row)
def read_csv(self, datestr):
'''
read station temperatures from csv file
'''
# initialize variables in csv file
obs_lat = []
obs_lon = []
obs_temp = []
obs_stype = []
obs_sname = []
# start reading csv file
with open(self.csvfile, 'r') as inp:
reader = csv.reader(inp)
next(reader) # skip header
for row in reader:
# append variables
obs_lat.append(float(row[0]))
obs_lon.append(float(row[1]))
obs_temp.append(float(row[2]))
obs_stype.append(str(row[3]))
obs_sname.append(str(row[4]))
# zip variables
self.obs = zip(obs_lat, obs_lon, obs_temp, obs_stype, obs_sname)
|
{
"content_hash": "64b0efcc3b1018ab7c6d28c3aaa4be58",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 86,
"avg_line_length": 36.494623655913976,
"alnum_prop": 0.5064820271066588,
"repo_name": "rvanharen/wrfpy",
"id": "8f5c8c6c1761f09d0148fa63efec7261b08da586",
"size": "6814",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wrfpy/readObsTemperature.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128498"
}
],
"symlink_target": ""
}
|
from what_apps.people.factories import UserFactory
from what_apps.people.models import UserProfile
def setup():
rusty = UserFactory.create(password="password",
first_name="rusty",
last_name="spike",
username="rspike")
rusty_profile = UserProfile.objects.create(user=rusty)
return rusty, rusty_profile
|
{
"content_hash": "01370387bd151279c27003c96b6b9b91",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 31.846153846153847,
"alnum_prop": 0.5797101449275363,
"repo_name": "SlashRoot/WHAT",
"id": "4106b449cc8eed28c78479dd4893f0181cd60900",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "what_apps/people/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91763"
},
{
"name": "CoffeeScript",
"bytes": "1746"
},
{
"name": "JavaScript",
"bytes": "486131"
},
{
"name": "Python",
"bytes": "707045"
}
],
"symlink_target": ""
}
|
import json
import os
from flask import url_for
def configure_manifest_files(app):
"""
Loads the manifest file and register the `url_for_asset_` template tag.
:param app:
:return:
"""
manifest = {}
def parse_manifest_json():
try:
manifest_file = os.path.join(os.path.dirname(__file__), os.pardir, 'static/dist/manifest.json')
with open(manifest_file) as file:
manifest.update(json.load(file))
for source, target in manifest.copy().items():
manifest[source] = os.path.join("dist", target)
except Exception: # noqa pylint: disable=broad-except
print("Please make sure to build the frontend in static/ directory and restart the server")
def get_asset_url(filename):
if app.debug:
parse_manifest_json()
return url_for('static', filename=manifest.get(filename, ''))
parse_manifest_json()
@app.context_processor
def get_url_for_asset(): # pylint: disable=unused-variable
"""
Template tag to return the asset URL.
WebPack renders the assets after minification and modification
under the static/dist folder.
This template tag reads the asset name in manifest.json and returns
the appropriate file.
"""
return dict(url_for_asset=get_asset_url)
|
{
"content_hash": "2739e78a3cdbf39e4fa05f8439fbb3be",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 107,
"avg_line_length": 32.23255813953488,
"alnum_prop": 0.6219336219336219,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "0294fc394905d9e492854405853c5498eb62c1cd",
"size": "2174",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/www/extensions/init_manifest_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('geokey_checklist', '0013_auto_20151101_0116'),
]
operations = [
migrations.RemoveField(
model_name='checklistsettings',
name='frequencybeforeexpiration',
),
migrations.AddField(
model_name='checklistsettings',
name='lastremindercheck',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='checklistsettings',
name='frequencyonexpiration',
field=models.CharField(default=30, max_length=100, choices=[(30, b'every month'), (60, b'every two months'), (90, b'every three months'), (180, b'every six months'), (365, b'once a year')]),
),
]
|
{
"content_hash": "dee169b070a0eccd1f09db4bfdb49ca2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 202,
"avg_line_length": 32.25925925925926,
"alnum_prop": 0.6050516647531573,
"repo_name": "ExCiteS/geokey-checklist",
"id": "744d4576730237dc39db519d7b19e1916bd08893",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geokey_checklist/migrations/0014_auto_20151104_0111.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24454"
},
{
"name": "JavaScript",
"bytes": "19750"
},
{
"name": "Python",
"bytes": "94766"
}
],
"symlink_target": ""
}
|
__author__ = "Rich Smith"
__version__ = "0.1"
import os
import ConfigParser
from vmware_guest_url_opener import RegisterHandler
class Uninstall(object):
"""
Remove VMWareGuestURLOpener
"""
def __init__(self, config_file = "~/.vmware_guest_url_opener.cfg"):
"""
"""
self.config = ConfigParser.ConfigParser()
self.config_src = os.path.expanduser(config_file)
def __call__(self):
"""
Pretty simple, read the config and restore the handlers as they were before VMWareGuestURLOpener was installed
"""
raw_input("\n\n ** About to uninstall VMWareGuestURLOpener as the default, hit enter to continue ....")
if not self.config.read(self.config_src):
print "[-] Problem reading configuration file %s"%(self.config_src)
return False
try:
orig_http_handler = self.config.get("config", "orig_http_handler")
orig_https_handler = self.config.get("config", "orig_https_handler")
except ConfigParser.NoOptionError, err:
print err
print "Uninstallation cannot continue, please reset your default handler manually."
return False
##Reset them as default handlers
RegisterHandler(orig_http_handler, orig_https_handler)
print "\nReset handlers to:\n\tHTTP: %s\n\tHTTPS: %s\n"%(orig_http_handler, orig_https_handler)
##Delete the config
ret = raw_input("Do you want to delete the config file (%s)? (yes/NO) "%(self.config_src))
if ret.lower() not in ["y", "yes"]:
print "[!] Skipping removal of config file"
else:
try:
os.unlink(self.config_src)
print "[+] Config file deleted"
except Exception, err:
print "[-] Error removing config file '%s' - '%s'"%(self.config_src, err)
print "\n**To complete the uninstallation you must MANUALLY delete the VMWareGuestURLOpener application package from wherever you copied it to after installation**\n]n"
return True
if __name__ == "__main__":
"""
Call main code
"""
U =Uninstall()
U()
|
{
"content_hash": "cb4fd0c7b86cede936357951b5966a12",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 176,
"avg_line_length": 34.74603174603175,
"alnum_prop": 0.6030150753768844,
"repo_name": "MyNameIsMeerkat/VMWareGuestURLOpener",
"id": "703cd01b782f84eb561eb2adacf752f11b9bc2f4",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uninstall.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14801"
}
],
"symlink_target": ""
}
|
import sys
import click
from indico.cli.core import cli_group
from indico.core import signals
from indico.core.db import db
from indico.modules.events import Event
from indico.modules.events.export import export_event, import_event
from indico.modules.users.models.users import User
@cli_group()
def cli():
pass
@cli.command()
@click.argument('event_id', type=int)
@click.option('-u', '--user', 'user_id', type=int, default=None, metavar="USER_ID",
help="The user which will be shown on the log as having restored the event (default: no user).")
@click.option('-m', '--message', 'message', metavar="MESSAGE", help="An additional message for the log")
def restore(event_id, user_id, message):
"""Restore a deleted event."""
event = Event.get(event_id)
user = User.get(user_id) if user_id else None
if event is None:
click.secho('This event does not exist', fg='red')
sys.exit(1)
elif not event.is_deleted:
click.secho('This event is not deleted', fg='yellow')
sys.exit(1)
event.restore(message, user)
signals.after_process.send()
db.session.commit()
click.secho(f'Event undeleted: "{event.title}"', fg='green')
@cli.command()
@click.argument('event_id', type=int)
@click.argument('target_file', type=click.File('wb'))
def export(event_id, target_file):
"""Export all data associated with an event.
This exports the whole event as an archive which can be imported
on another other Indico instance. Importing an event is only
guaranteed to work if it was exported on the same Indico version.
"""
event = Event.get(event_id)
if event is None:
click.secho('This event does not exist', fg='red')
sys.exit(1)
elif event.is_deleted:
click.secho('This event has been deleted', fg='yellow')
click.confirm('Export it anyway?', abort=True)
export_event(event, target_file)
@cli.command('import')
@click.argument('source_file', type=click.File('rb'))
@click.option('--create-users/--no-create-users', default=None,
help='Whether to create missing user or skip them. By default a confirmation prompt is shown when '
'the archive contains such users')
@click.option('--force', is_flag=True, help='Ignore Indico version mismatches (DANGER)')
@click.option('-v', '--verbose', is_flag=True, help='Show verbose information on what is being imported')
@click.option('-y', '--yes', is_flag=True, help='Always commit the imported event without prompting')
@click.option('-c', '--category', 'category_id', type=int, default=0, metavar='ID',
help='ID of the target category. Defaults to the root category.')
def import_(source_file, create_users, force, verbose, yes, category_id):
"""Import an event exported from another Indico instance."""
click.echo('Importing event...')
event = import_event(source_file, category_id, create_users=create_users, verbose=verbose, force=force)
if event is None:
click.secho('Import failed.', fg='red')
sys.exit(1)
if not yes and not click.confirm(click.style('Import finished. Commit the changes?', fg='green'), default=True):
db.session.rollback()
sys.exit(1)
db.session.commit()
click.secho(event.external_url, fg='green', bold=True)
|
{
"content_hash": "519c91696891c816db677635ff6dd850",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 116,
"avg_line_length": 41.525,
"alnum_prop": 0.6773028296207104,
"repo_name": "pferreir/indico",
"id": "35f7f2dec5cf231d76aab0b1f9faa3c93cbc7b12",
"size": "3536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/cli/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""
This module provides functions for upgrading scripts from pymel 0.9 to 1.0. It
fixes two types non-compatible code:
- pymel.all is now the main entry-point for loading all pymel modules
- import pymel --> import pymel.all as pymel
- import pymel as pm --> import pymel.all as pm
- from pymel import * --> from pymel.all import *
- pymel.mayahook.versions.Version is now pymel.versions
To use, run this in a script editor tab::
import pymel.tools.upgradeScripts
pymel.tools.upgradeScripts.upgrade()
This will print out all the modules that will be upgraded. If everything looks good
run the following to perform the upgrade::
pymel.tools.upgradeScripts.upgrade(test=False)
Once you're sure that the upgrade went smoothly, run::
pymel.tools.upgradeScripts.cleanup()
This will delete the backup files.
If you need to undo the changes, run::
pymel.tools.upgradeScripts.undo()
Keep in mind that this will restore files to their state at the time that you ran
``upgrade``. If you made edits to the files after running ``upgrade`` they will
be lost.
"""
import sys, os.path, re, shutil
from collections import defaultdict
import pymel.core # we don't use this, but it ensures that maya and sys.path are properly initialized
#IMPORT_RE = re.compile( '(\s*import\s+(?:[a-zA-Z0-9_.,\s]+,\s*)?)(pymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)((?:\s*,\s*[a-zA-Z][a-zA-Z0-9_.,\s]+)?(?:\s+as\s+([a-zA-Z][a-zA-Z0-9_]+))?(?:\s*))$' )
#IMPORT_RE = re.compile( r'(\s*import\s+(?:.*))(\bpymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)(?:\s+as\s+([a-zA-Z][a-zA-Z0-9_]+))?(.*)$' )
IMPORT_RE = re.compile( r'(?P<start>\s*import\s+.*)(?P<pymel>\bpymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*\b)(?P<end>(?:\s+as\s+(?P<details>[a-zA-Z][a-zA-Z0-9_]+))?(?:.|\s)*)$' )
FROM_RE = re.compile( r'(?P<start>\s*from\s+)(?P<pymel>pymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)(?P<end>(?:\s+import\s+(?P<details>[*]|(?:[a-zA-Z0-9_.,\s]+)))(?:\s*))$' )
#([a-zA-Z][a-zA-Z_.]+)([a-zA-Z][a-zA-Z_.]+)
LOGNAME = 'pymel_upgrade.log'
BACKUPEXT = '.pmbak'
last_logfile = None
def _getMayaAppDir():
if not os.environ.has_key('MAYA_APP_DIR') :
home = os.environ.get('HOME', os.environ.get('USERPROFILE', None) )
if not home :
return None
else :
if sys.platform == 'darwin':
return os.path.join(home, 'Library/Preferences/Autodesk/maya')
else:
return os.path.join(home, 'maya')
return os.environ['MAYA_APP_DIR']
objects = [
( 'Version',
re.compile('([a-zA-Z_][a-zA-Z0-9_.]+[.])?(Version[.])([a-zA-Z_][a-zA-Z0-9_]*)'),
('pymel',
'pymel.version',
'pymel.internal',
'pymel.internal.version' ),
'versions',
{ 'current' : 'current()',
'v85sp1' : 'v85_SP1',
'v2008sp1' : 'v2008_SP1',
'v2008ext2' : 'v2008_EXT2',
'v2009ext1' : 'v2009_EXT1',
'v2009sp1a' : 'v2009_SP1A'
}
)
]
PREFIX = 1
OBJECT = 2
SUFFIX = 3
class LogError(ValueError):pass
def _getLogfile(logfile, read=True):
if logfile is None:
global last_logfile
if last_logfile:
logfile = last_logfile
if logfile is None:
baseDir = _getMayaAppDir()
if not baseDir:
baseDir = os.curdir
logfile = logfile = os.path.join(baseDir, LOGNAME)
if read and not os.path.isfile( logfile ):
raise LogError, "could not find an existing %s. please pass the path to this file, which was generated during upgrade" % LOGNAME
return os.path.realpath(logfile)
def upgradeFile(filepath, test=True):
"""
upgrade a single file
"""
try:
f = open(filepath)
lines = f.readlines()
f.close()
except Exception, e:
print str(e)
return False, False
modified = False
uses_pymel = False
pymel_namespaces = defaultdict(set)
rev_pymel_namespaces = defaultdict(set)
for i, line in enumerate(lines):
m = IMPORT_RE.match(line)
mode = None
if not m:
m = FROM_RE.match(line)
mode = 'from'
else:
mode = 'import'
if m:
#start, pymel_module, end, details = m.groups()
d= m.groupdict()
start = d['start']
pymel_module = d['pymel']
end = d['end']
details = d['details']
if pymel_module == 'pymel.all':
print "skipping. already uses 'pymel.all':", filepath
return False, True
uses_pymel = True
if pymel_module == 'pymel':
# import pymel, foo --> import pymel.all as pymel, foo
# import pymel as pm, foo --> import pymel.all as pm, foo
# from pymel import foo --> from pymel.all import foo
as_name = ' as pymel' if mode == 'import' and not details else ''
lines[i] = start + 'pymel.all' + as_name + end
modified = True
if details:
details = details.strip()
if mode == 'import':
if details:
pymel_namespaces[pymel_module].add(details) # pymel.version -> version
# import pymel.internal as internal
# 'internal' -> 'pymel.internal'
rev_pymel_namespaces[details].add(pymel_module)
else:
# 'import pymel'
pymel_namespaces[pymel_module].add(pymel_module)
# import pymel.internal
# 'pymel.internal' -> 'pymel.internal'
rev_pymel_namespaces[pymel_module].add(pymel_module)
elif mode == 'from':
details = '' if details == '*' else details
for detail in details.split(','):
if detail:
module = pymel_module + '.' + detail
else:
module = pymel_module
pymel_namespaces[pymel_module].add(detail)
# from pymel import internal
# 'internal' -> 'pymel.internal'
# from pymel import *
# '' -> 'pymel'
rev_pymel_namespaces[detail].add(module)
if uses_pymel:
for obj, reg, obj_namespaces, replace, attr_remap in objects:
parts = reg.split(line)
if len(parts) > 1:
#print parts
for j in range(0, len(parts)-1, 4):
try:
ns = parts[j+PREFIX]
except IndexError, err:
pass
else:
ns = ns if ns else ''
#print '\t', `ns`
parts[j+PREFIX] = ns
#print "checking namespace", `ns`, 'against', dict(rev_pymel_namespaces)
for namespace, orig_namespaces in rev_pymel_namespaces.iteritems():
if namespace == '' or ns == namespace or ns.startswith(namespace + '.'):
for orig_namespace in orig_namespaces:
if namespace == '':
expanded_ns = orig_namespace + '.' + ns
else:
expanded_ns = ns.replace(namespace, orig_namespace)
#print 'expanded', expanded_ns
if expanded_ns.rstrip('.') in obj_namespaces:
#print "found namespace", `ns`, `expanded_ns`
try:
pmns = list(pymel_namespaces['pymel'])[0]
except IndexError:
print "warning: %s: no pymel namespace was found" % filepath
else:
if pmns =='':
parts[j+PREFIX] = replace + '.'
else:
parts[j+PREFIX] = pmns + '.' + replace + '.'
parts[j+OBJECT] = None
attr = parts[j+SUFFIX]
parts[j+SUFFIX] = attr_remap.get(attr, attr)
break
lines[i] = ''.join( [ x for x in parts if x is not None] )
#print 'before:', `line`
#print 'after: ', `lines[i]`
success = True
if modified:
if not test:
tmpfile = filepath + '.tmp'
try:
f = open(tmpfile, 'w')
f.writelines(lines)
f.close()
except (IOError, OSError), err:
print "error writing temporary file: %s: %s" % ( tmpfile, err)
success = False
if success:
try:
os.rename(filepath, filepath + BACKUPEXT)
except (IOError, OSError), err:
print "error backing up file %s to %s.pmbak: %s" % ( filepath, filepath, err)
success = False
else:
try:
os.rename(tmpfile, filepath)
except (IOError, OSError), err:
print "error renaming temp file: %s" % ( err)
success = False
print "attempting to restore original file"
try:
os.rename(filepath + BACKUPEXT, filepath)
except OSError, err:
print "could not restore original: %s" % ( err)
return modified, success
def upgrade(logdir=None, test=True, excludeFolderRegex=None, excludeFileRegex=None, verbose=False, force=False):
"""
search PYTHONPATH (aka. sys.path) and MAYA_SCRIPT_PATH for python files using
pymel that should be upgraded
Keywords
--------
:param logdir:
directory to which to write the log of modified files
:param test:
when run in test mode (default) no files are changed
:param excludeFolderRegex:
a regex string which should match against a directory's basename, without parent path
:param excludeFileRegex:
a regex string which should match against a file's basename, without parent path or extension
:param verbose:
print more information during conversion
:param force:
by default, `upgrade` will skip files which already have already been processed,
as determined by the existence of a backup file with a .pmbak extension. setting
force to True will ignore this precaution
"""
if test:
print "running in test mode. set test=False to enable file editing"
if excludeFolderRegex:
assert isinstance(excludeFolderRegex, basestring), "excludeFolderRegex must be a string"
if excludeFileRegex:
assert isinstance(excludeFileRegex, basestring), "excludeFileRegex must be a string"
logfile = os.path.join(_getLogfile(logdir, read=False))
try:
log = open(logfile, 'w' )
except (IOError, OSError), err:
print "could not create log file at %s. please pass a writable directory to 'logdir' keyword: %s" % ( logdir, err)
return
global last_logfile
last_logfile = logfile
completed = []
try:
for path in sys.path + os.environ['MAYA_SCRIPT_PATH'].split(os.pathsep):
#for path in ['/Volumes/luma/_globalSoft/dev/chad/python/pymel']:
for root, dirs, files in os.walk(path):
for f in files:
if f.endswith('.py') and not f.startswith('.'):
if not excludeFileRegex or not re.match( excludeFileRegex, f[:-3] ):
fpath = os.path.realpath(os.path.join(root,f))
if fpath not in completed:
if os.path.exists(fpath+BACKUPEXT) and not force:
print "file has already been converted. skipping: %s (use force=True to force conversion)" % fpath
if not test:
# keep as part of the log so that undo will work
log.write( fpath + '\n' )
else:
modified, stat = upgradeFile( fpath, test )
if modified and stat:
print 'needs upgrading:' if test else 'upgraded:', fpath
if not test:
log.write( fpath + '\n' )
completed.append(fpath)
elif verbose:
print "skipping", os.path.join(root,f)
#print 'before', root, dirs
# dirs must be modified in-place
i = 0
tmpdirs = dirs[:]
for dir in tmpdirs:
#print '\t', `dir`
if dir.startswith('.') or dir == 'pymel' \
or not os.path.isfile(os.path.join(root, dir, '__init__.py')) \
or ( excludeFolderRegex and re.match( excludeFolderRegex, dir ) ):
del dirs[i]
if verbose:
print "skipping", os.path.join(root, dir)
else:
i += 1
#print 'after', root, dirs
except Exception, err:
import traceback
traceback.print_exc()
finally:
if not test:
print "writing log to %s" % logfile
log.close()
if test:
print "test complete"
print "to upgrade the listed files run:\nupgrade(test=False)"
else:
print "upgrade complete. the original files have been renamed with a %s extension\n" % BACKUPEXT
print "to remove the backed-up original files run:\ncleanup(%r)\n" % logfile
print "to restore the original files run:\nundo(%r)" % logfile
def undoFile(filepath):
"""
undo a single file
"""
backup = filepath + BACKUPEXT
if os.path.isfile(backup):
try:
os.rename(backup, filepath )
print "restored", filepath
except (IOError, OSError), err:
print "error restoring file %s.pmbak to %s: %s" % ( filepath, filepath, err)
return False
else:
print "error restoring %s: backup file does not exist: %s. skipping" % ( filepath, backup)
return True
def _findbackups():
undofiles = []
for path in sys.path + os.environ['MAYA_SCRIPT_PATH'].split(os.pathsep):
for root, dirs, files in os.walk(path):
#print root
for f in files:
if f.endswith('.py' + BACKUPEXT) and not f.startswith('.'):
fpath = os.path.realpath(os.path.join(root,f.rstrip(BACKUPEXT)))
#print "adding", fpath
undofiles.append(fpath)
i = 0
tmpdirs = dirs[:]
for dir in tmpdirs:
#print '\t', `dir`
if dir.startswith('.') or dir == 'pymel' \
or not os.path.isfile(os.path.join(root, dir, '__init__.py')):
del dirs[i]
else:
i += 1
return undofiles
def _getbackups(logfile, force):
try:
log = open(_getLogfile(logfile), 'r' )
except LogError, e:
if force:
undofiles = _findbackups()
else:
raise LogError, str(e) + '.\nif you lost your logfile, set force=True to search sys.path for *.pmbak files to restore instead.'
else:
undofiles = [ x.rstrip() for x in log.readlines() if x]
log.close()
return undofiles
def undo(logfile=None, force=False):
"""
undo converted files to their original state and remove backups
:param logfile:
the logfile containing the list of files to restore. if None, the logfile
will be determined in this order:
1. last used logfile (module must have remained loaded since running upgrade)
2. MAYA_APP_DIR
3. current working directory
:param force:
if you've lost the original logfile, setting force to True will cause the function
to recurse sys.path looking for backup files to restore instead of using the log.
if your sys.path is setup exactly as it was during upgrade, all files should
be restored, but without the log it is impossible to be certain.
"""
undofiles = _getbackups(logfile, force)
try:
for file in undofiles:
undoFile(file)
print 'done'
except Exception, err:
import traceback
traceback.print_exc()
def cleanup(logfile=None, force=False):
"""
remove backed-up files. run this when you are certain that the upgrade went
smoothly and you no longer need the original backups.
:param logfile:
the logfile containing the list of files to restore. if None, the logfile
will be determined in this order:
1. last used logfile (module must have remained loaded since running upgrade)
2. MAYA_APP_DIR
3. current working directory
:param force:
if you've lost the original logfile, setting force to True will cause the function
to recurse sys.path looking for backup files to cleanup instead of using the log.
if your sys.path is setup exactly as it was during upgrade, all files should
be restored, but without the log it is impossible to be certain.
"""
undofiles = _getbackups(logfile, force)
try:
for file in undofiles:
bkup = file + BACKUPEXT
try:
print "removing", bkup
os.remove(bkup)
except (IOError, OSError), err:
print "error removing file %s: %s" % ( bkup, err)
print 'done'
except Exception, err:
import traceback
traceback.print_exc()
|
{
"content_hash": "40f32042cd0fb8153128f2b3b52de197",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 188,
"avg_line_length": 40.0531914893617,
"alnum_prop": 0.5097476759628154,
"repo_name": "CountZer0/PipelineConstructionSet",
"id": "18aea4879c3161544c3005608e4cd57803aab936",
"size": "18825",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/maya/site-packages/pymel-1.0.3/pymel/tools/upgradeScripts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49130"
},
{
"name": "JavaScript",
"bytes": "21455"
},
{
"name": "Python",
"bytes": "24534027"
},
{
"name": "Shell",
"bytes": "784"
}
],
"symlink_target": ""
}
|
"""This module holds :class:`jukeboxcore.gui.treemodel.ItemData` subclasses that represent filesys data,
e.g. a :class:`jukeboxcore.filesys.TaskFileInfo`
"""
from PySide import QtCore
from jukeboxcore.gui.treemodel import ItemData
from jukeboxcore.filesys import JB_File
def taskfileinfo_element_data(tfi, role):
"""Return the data for the element (e.g. the Asset or Shot)
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None
"""
task = tfi.task
element = task.element
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return element.name
def taskfileinfo_task_data(tfi, role):
"""Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None
"""
task = tfi.task
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return task.name
def taskfileinfo_descriptor_data(tfi, role):
"""Return the data for descriptor
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the descriptor
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return tfi.descriptor
def taskfileinfo_path_data(tfi, role):
"""Return the data for path
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the path
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return JB_File(tfi).get_fullpath()
def taskfileinfo_version_data(tfi, role):
"""Return the data for version
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` that holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the version
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole:
return 'v%03i' % tfi.version
def taskfileinfo_rtype_data(tfi, role):
"""Return the data for rtype
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` that holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the releasetype
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole:
return tfi.releasetype
class TaskFileInfoItemData(ItemData):
"""Item Data for :class:`jukeboxcore.gui.treemodel.TreeItem` that represents a :class:`jukeboxcore.filesys.TaskFileInfo`
"""
def __init__(self, taskfileinfo):
"""Constructs a new item data for the taskfileinfo
:param taskfileinfo: the taskfileinfo to represent
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
super(TaskFileInfoItemData, self).__init__()
self._taskfileinfo = taskfileinfo
columns = [taskfileinfo_element_data,
taskfileinfo_task_data,
taskfileinfo_descriptor_data,
taskfileinfo_version_data,
taskfileinfo_rtype_data,
taskfileinfo_path_data,]
def column_count(self, ):
"""Return the number of columns that can be queried for data
:returns: the number of columns
:rtype: int
:raises: None
"""
return len(self.columns)
def data(self, column, role):
"""Return the data for the specified column and role
The column addresses one attribute of the data.
:param column: the data column
:type column: int
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: data depending on the role
:rtype:
:raises: None
"""
return self.columns[column](self._taskfileinfo, role)
def internal_data(self, ):
"""Return the taskfileinfo
:returns: the taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
return self._taskfileinfo
|
{
"content_hash": "f5358d27631b34b1e441fb05a3e9dff0",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 124,
"avg_line_length": 31.335483870967742,
"alnum_prop": 0.6627547869054973,
"repo_name": "JukeboxPipeline/jukebox-core",
"id": "2ac8e48bb75ca57adace93c7b62a1bda3290775e",
"size": "4857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jukeboxcore/gui/filesysitemdata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1221"
},
{
"name": "Python",
"bytes": "890248"
},
{
"name": "Shell",
"bytes": "962"
}
],
"symlink_target": ""
}
|
import pathlib
from qiime2 import sdk
from qiime2.plugin import model
def identity_transformer(view):
return view
class ModelType:
@staticmethod
def from_view_type(view_type):
if issubclass(view_type, model.base.FormatBase):
if issubclass(view_type,
model.SingleFileDirectoryFormatBase):
# HACK: this is necessary because we need to be able to "act"
# like a FileFormat when looking up transformers, but our
# input/output coercion still needs to bridge the
# transformation as we do not have transitivity
# In other words we have DX and we have transformers of X
# In a perfect world we would automatically define DX -> X and
# let transitivity handle it, but since that doesn't exist, we
# need to treat DX as if it were X and coerce behind the scenes
# TODO: redo this when transformers are transitive
return SingleFileDirectoryFormatType(view_type)
# Normal format type
return FormatType(view_type)
else:
# TODO: supporting stdlib.typing may require an alternate
# model type as `isinstance` is a meaningless operation
# for them so validation would need to be handled differently
return ObjectType(view_type)
def __init__(self, view_type):
self._pm = sdk.PluginManager()
self._view_type = view_type
def make_transformation(self, other, recorder=None):
# TODO: do something with the recorder.
transformer = self._get_transformer_to(other)
if transformer is None:
raise Exception("No transformation from %r to %r" %
(self._view_type, other._view_type))
def transformation(view):
view = self.coerce_view(view)
self.validate(view)
new_view = transformer(view)
new_view = other.coerce_view(new_view)
other.validate(new_view)
if transformer is not identity_transformer:
other.set_user_owned(new_view, False)
return new_view
return transformation
def _get_transformer_to(self, other):
transformer = self._lookup_transformer(self._view_type,
other._view_type)
if transformer is None:
return other._get_transformer_from(self)
return transformer
def has_transformation(self, other):
""" Checks to see if there exist transformers for other
Parameters
----------
other : ModelType subclass
The object being checked for transformer
Returns
-------
bool
Does the specified transformer exist for other?
"""
transformer = self._get_transformer_to(other)
return transformer is not None
def _get_transformer_from(self, other):
return None
def coerce_view(self, view):
return view
def _lookup_transformer(self, from_, to_):
if from_ == to_:
return identity_transformer
try:
return self._pm.transformers[from_][to_].transformer
except KeyError:
return None
def set_user_owned(self, view, value):
pass
class FormatType(ModelType):
def coerce_view(self, view):
if type(view) is str or isinstance(view, pathlib.Path):
return self._view_type(view, mode='r')
if isinstance(view, self._view_type):
# wrap original path (inheriting the lifetime) and return a
# read-only instance
return self._view_type(view.path, mode='r')
return view
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not an instance of %r."
% (view, self._view_type))
# Formats have a validate method, so defer to it
view.validate()
def set_user_owned(self, view, value):
view.path._user_owned = value
class SingleFileDirectoryFormatType(FormatType):
def __init__(self, view_type):
# Single file directory formats have only one file named `file`
# allowing us construct a model type from the format of `file`
self._wrapped_view_type = view_type.file.format
super().__init__(view_type)
def _get_transformer_to(self, other):
# Legend:
# - Dx: single directory format of x
# - Dy: single directory format of y
# - x: input format x
# - y: output format y
# - ->: implicit transformer
# - =>: registered transformer
# - |: or, used when multiple situation are possible
# It looks like all permutations because it is...
# Dx -> y | Dy via Dx => y | Dy
transformer = self._wrap_transformer(self, other)
if transformer is not None:
return transformer
# Dx -> Dy via Dx -> x => y | Dy
transformer = self._wrap_transformer(self, other, wrap_input=True)
if transformer is not None:
return transformer
if type(other) is type(self):
# Dx -> Dy via Dx -> x => y -> Dy
transformer = self._wrap_transformer(
self, other, wrap_input=True, wrap_output=True)
if transformer is not None:
return transformer
# Out of options, try for Dx -> Dy via Dx => y -> Dy
return other._get_transformer_from(self)
def _get_transformer_from(self, other):
# x | Dx -> Dy via x | Dx => y -> Dy
# IMPORTANT: reverse other and self, this method is like __radd__
return self._wrap_transformer(other, self, wrap_output=True)
def _wrap_transformer(self, in_, out_, wrap_input=False,
wrap_output=False):
input = in_._wrapped_view_type if wrap_input else in_._view_type
output = out_._wrapped_view_type if wrap_output else out_._view_type
transformer = self._lookup_transformer(input, output)
if transformer is None:
return None
if wrap_input:
transformer = in_._wrap_input(transformer)
if wrap_output:
transformer = out_._wrap_output(transformer)
return transformer
def _wrap_input(self, transformer):
def wrapped(view):
return transformer(view.file.view(self._wrapped_view_type))
return wrapped
def _wrap_output(self, transformer):
def wrapped(view):
new_view = self._view_type()
file_view = transformer(view)
if transformer is not identity_transformer:
self.set_user_owned(file_view, False)
new_view.file.write_data(file_view, self._wrapped_view_type)
return new_view
return wrapped
class ObjectType(ModelType):
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not of type %r, cannot transform further."
% (view, self._view_type))
|
{
"content_hash": "52baf9bd8bffdb0a1862da0f5437ad06",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 33.948356807511736,
"alnum_prop": 0.5848430369243535,
"repo_name": "ebolyen/qiime2",
"id": "791516b085ddabe5f47195dd94e61f6936280fd4",
"size": "7580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime2/core/transform.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "492020"
}
],
"symlink_target": ""
}
|
import os
import re
import argparse as ap
import pandas as pd
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from retrying import retry
profile_folder = '/home/yangh/.mozilla/firefox/webdriver-profile'
url_morningstar = ('http://financials.morningstar.com/%s.html?'
't=%s®ion=USA&culture=en_US')
url_statement = {
'income': 'income-statement/is',
'balance_sheet': 'balance-sheet/bs',
'cash_flow': 'cash-flow/cf',
'ratio': 'ratios/r'
}
csv_filename = {
'income': 'Income Statement',
'balance_sheet': 'Balance Sheet',
'cash_flow': 'Cash Flow',
'ratio': 'Key Ratios'
}
freq = ['Quarterly', 'Annual']
prefix = 'Y'
def start_firefox_webdriver(profile_folder):
profile = webdriver.FirefoxProfile(profile_folder)
driver = webdriver.Firefox(firefox_profile=profile)
return driver
@retry(wait_fixed=200, stop_max_delay=10000)
def read_csv(csv, skip):
return pd.read_csv(csv, skiprows=skip)
@retry(wait_fixed=200, stop_max_delay=10000)
def execute_script(driver, script):
driver.execute_script(script)
def download_statement_settings(driver, freq):
if freq == 'Quarterly':
execute_script(driver, "SRT_stocFund.ChangeFreq(3,'Quarterly')")
elif freq == 'Annual':
execute_script(driver, "SRT_stocFund.ChangeFreq(12,'Annual')")
execute_script(driver, "SRT_stocFund.orderControl('asc','Ascending')")
execute_script(driver, "SRT_stocFund.ChangeRounding(-1)") # in thousand
execute_script(driver, "SRT_stocFund.changeDataType('R','Restated')")
execute_script(driver, "SRT_stocFund.Export()")
def download_keyratios_settings(driver):
execute_script(driver, "orderChange('asc','Ascending')")
execute_script(driver, "exportKeyStat2CSV()")
# keys cannot be only numbers or have '-', so rename column keys with
# Yxxxxx-xx
def rename_columns(statement):
cols = statement.columns.tolist()
cols[0] = 'report'
statement.columns = [re.sub(r'^([0-9].+)-([0-9].+$)',
prefix + r'\1'+'_'+r'\2', x) for x in cols]
return statement
def rename_columns_back(statement):
cols = statement.columns.tolist()
statement.columns = [re.sub(r'^Y([0-9].+)_([0-9].+$)',
r'\1'+'-'+r'\2', x) for x in cols]
return statement
def store_statement(store, h5_node, statement):
if store.get_storer(h5_node) == None:
store.append(h5_node, statement, data_columns=True)
else:
# python set is good to find the diff
s_orig = store[h5_node]
s_diff = list(set(statement.columns.tolist()) -
set(s_orig.columns.tolist()))
print statement.columns.tolist()
print s_orig.columns.tolist()
print s_diff
if s_diff:
s_orig[s_diff] = statement[s_diff]
if 'TTM' in statement:
s_orig['TTM'] = statement['TTM']
store.put(h5_node, s_orig, table=True)
print "update database"
else:
print "no update"
def keyratios_data(driver, csv, store, node):
print csv
download_keyratios_settings(driver)
statement = rename_columns(read_csv(csv, 2))
store_statement(store, node, statement)
os.remove(csv)
def statement_data(driver, f, csv, store, node):
print csv + ' ' + f
download_statement_settings(driver, f)
statement = rename_columns(read_csv(csv, 1))
st_node = node + '/' + f
store_statement(store, st_node, statement)
os.remove(csv)
def download_single_statement(symbol, driver, data_dir, store, st_type):
csv = os.path.join(data_dir, '%s %s.csv' %(symbol, csv_filename[st_type]))
url = url_morningstar % (url_statement[st_type], symbol)
node = '/%s/%s' %(symbol, st_type)
driver.get(url)
if st_type == 'ratio':
keyratios_data(driver, csv, store, node)
else:
map(lambda f: statement_data(driver, f, csv, store, node), freq)
def download_financial(symbol, driver, data_dir):
store = pd.HDFStore(data_dir + '/financials.h5')
print symbol + ' downloading...'
map(lambda x: download_single_statement(symbol, driver, data_dir, store, x),
csv_filename.keys())
store.close()
def download_financial_morningstar(symbols, data_dir):
driver = start_firefox_webdriver(profile_folder)
map(lambda x: download_financial(x, driver, data_dir), symbols)
def get_cmd_line():
parser = ap.ArgumentParser(description='update stock financial data')
parser.add_argument("data_dir")
cmd_args = parser.parse_args()
data_dir = os.path.abspath(cmd_args.data_dir)
if not os.path.exists(data_dir):
print "%s does not exist!" % data_dir
os.sys.exit()
return data_dir
def main():
data_dir = get_cmd_line()
symbols = ['QCOM', 'SWKS', 'INTC']
download_financial_morningstar(symbols, data_dir)
if __name__ == "__main__":
main()
|
{
"content_hash": "c484411b40c019fc0a8ee393ffb5dda7",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 80,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6437015898899306,
"repo_name": "sp500/stock",
"id": "3918ab1cf9b4e9583d46140d5a0267b2b8012110",
"size": "4929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stock_financial.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13647"
},
{
"name": "Jupyter Notebook",
"bytes": "1408"
},
{
"name": "Python",
"bytes": "39968"
}
],
"symlink_target": ""
}
|
import re
import sys
import pytest
from _pytest.config import get_plugin_manager
from _jb_runner_tools import jb_start_tests, jb_patch_separator, jb_doc_args, JB_DISABLE_BUFFERING
from teamcity import pytest_plugin
if __name__ == '__main__':
path, targets, additional_args = jb_start_tests()
sys.argv += additional_args
joined_targets = jb_patch_separator(targets, fs_glue="/", python_glue="::", fs_to_python_glue=".py::")
# When file is launched in py.test it should be file.py: you can't provide it as bare module
joined_targets = [t + ".py" if ":" not in t else t for t in joined_targets]
sys.argv += [path] if path else joined_targets
# plugin is discovered automatically in 3, but not in 2
# to prevent "plugin already registered" problem we check it first
plugins_to_load = []
if not get_plugin_manager().hasplugin("pytest-teamcity"):
plugins_to_load.append(pytest_plugin)
args = sys.argv[1:]
if JB_DISABLE_BUFFERING and "-s" not in args:
args += ["-s"]
jb_doc_args("py.test", args)
pytest.main(args, plugins_to_load)
|
{
"content_hash": "543a740f9e4b1ec548b1b5e48fa872dd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 106,
"avg_line_length": 36.766666666666666,
"alnum_prop": 0.6736174070716229,
"repo_name": "FHannes/intellij-community",
"id": "61f657f356c71fcbd9d42a1d17d0d5ef7566b96e",
"size": "1118",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/helpers/pycharm/_jb_pytest_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60476"
},
{
"name": "C",
"bytes": "195249"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "195362"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3139656"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1841771"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "161644445"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "3199479"
},
{
"name": "Lex",
"bytes": "185152"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "49837"
},
{
"name": "Objective-C",
"bytes": "27941"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "24172584"
},
{
"name": "Roff",
"bytes": "35232"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "64469"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
import commands
import time
from glob import glob
import msct_qc
import sct_utils as sct
from sct_utils import add_suffix
from sct_image import set_orientation
from sct_register_multimodal import Paramreg, ParamregMultiStep, register
from msct_parser import Parser
from msct_image import Image, find_zmin_zmax
from shutil import move
from sct_label_utils import ProcessLabels
import numpy as np
# get path of the toolbox
path_script = os.path.dirname(__file__)
path_sct = os.path.dirname(path_script)
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.remove_temp_files = 1 # remove temporary files
self.fname_mask = '' # this field is needed in the function register@sct_register_multimodal
self.padding = 10 # this field is needed in the function register@sct_register_multimodal
self.verbose = 1 # verbose
self.path_template = path_sct+'/data/PAM50'
self.path_qc = os.path.abspath(os.curdir)+'/qc/'
self.zsubsample = '0.25'
self.param_straighten = ''
# get default parameters
# Note: step0 is used as pre-registration
step0 = Paramreg(step='0', type='label', dof='Tx_Ty_Tz_Sz') # if ref=template, we only need translations and z-scaling because the cord is already straight
step1 = Paramreg(step='1', type='seg', algo='centermassrot', smooth='2')
# step2 = Paramreg(step='2', type='seg', algo='columnwise', smooth='0', smoothWarpXY='2')
step2 = Paramreg(step='2', type='seg', algo='bsplinesyn', metric='MeanSquares', iter='3', smooth='1')
# step3 = Paramreg(step='3', type='im', algo='syn', metric='CC', iter='1')
paramreg = ParamregMultiStep([step0, step1, step2])
# PARSER
# ==========================================================================================
def get_parser():
param = Param()
parser = Parser(__file__)
parser.usage.set_description('Register anatomical image to the template.')
parser.add_option(name="-i",
type_value="file",
description="Anatomical image.",
mandatory=True,
example="anat.nii.gz")
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation.",
mandatory=True,
example="anat_seg.nii.gz")
parser.add_option(name="-l",
type_value="file",
description="Labels. See: http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/",
mandatory=True,
default_value='',
example="anat_labels.nii.gz")
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder.",
mandatory=False,
default_value='')
parser.add_option(name="-t",
type_value="folder",
description="Path to template.",
mandatory=False,
default_value=param.path_template)
parser.add_option(name='-c',
type_value='multiple_choice',
description='Contrast to use for registration.',
mandatory=False,
default_value='t2',
example=['t1', 't2', 't2s'])
parser.add_option(name='-ref',
type_value='multiple_choice',
description='Reference for registration: template: subject->template, subject: template->subject.',
mandatory=False,
default_value='template',
example=['template', 'subject'])
parser.add_option(name="-param",
type_value=[[':'], 'str'],
description='Parameters for registration (see sct_register_multimodal). Default: \
\n--\nstep=0\ntype=' + paramreg.steps['0'].type + '\ndof=' + paramreg.steps['0'].dof + '\
\n--\nstep=1\ntype=' + paramreg.steps['1'].type + '\nalgo=' + paramreg.steps['1'].algo + '\nmetric=' + paramreg.steps['1'].metric + '\niter=' + paramreg.steps['1'].iter + '\nsmooth=' + paramreg.steps['1'].smooth + '\ngradStep=' + paramreg.steps['1'].gradStep + '\nslicewise=' + paramreg.steps['1'].slicewise + '\nsmoothWarpXY=' + paramreg.steps['1'].smoothWarpXY + '\npca_eigenratio_th=' + paramreg.steps['1'].pca_eigenratio_th + '\
\n--\nstep=2\ntype=' + paramreg.steps['2'].type + '\nalgo=' + paramreg.steps['2'].algo + '\nmetric=' + paramreg.steps['2'].metric + '\niter=' + paramreg.steps['2'].iter + '\nsmooth=' + paramreg.steps['2'].smooth + '\ngradStep=' + paramreg.steps['2'].gradStep + '\nslicewise=' + paramreg.steps['2'].slicewise + '\nsmoothWarpXY=' + paramreg.steps['2'].smoothWarpXY + '\npca_eigenratio_th=' + paramreg.steps['1'].pca_eigenratio_th,
mandatory=False)
parser.add_option(name="-param-straighten",
type_value='str',
description="""Parameters for straightening (see sct_straighten_spinalcord).""",
mandatory=False,
default_value='')
# parser.add_option(name="-cpu-nb",
# type_value="int",
# description="Number of CPU used for straightening. 0: no multiprocessing. By default, uses all the available cores.",
# mandatory=False,
# example="8")
parser.add_option(name="-r",
type_value="multiple_choice",
description="""Remove temporary files.""",
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value="multiple_choice",
description="""Verbose. 0: nothing. 1: basic. 2: extended.""",
mandatory=False,
default_value=param.verbose,
example=['0', '1', '2'])
parser.add_option(name="-param-qc",
type_value=[[','], 'str'],
description=msct_qc.Qc_Params.get_qc_params_description(["ofolder", "autoview", "generate"]),
mandatory=False)
return parser
# MAIN
# ==========================================================================================
def main():
parser = get_parser()
param = Param()
arguments = parser.parse(sys.argv[1:])
# get arguments
fname_data = arguments['-i']
fname_seg = arguments['-s']
fname_landmarks = arguments['-l']
if '-ofolder' in arguments:
path_output = arguments['-ofolder']
else:
path_output = ''
path_template = sct.slash_at_the_end(arguments['-t'], 1)
contrast_template = arguments['-c']
ref = arguments['-ref']
remove_temp_files = int(arguments['-r'])
verbose = int(arguments['-v'])
param.verbose = verbose # TODO: not clean, unify verbose or param.verbose in code, but not both
if '-param-straighten' in arguments:
param.param_straighten = arguments['-param-straighten']
# if '-cpu-nb' in arguments:
# arg_cpu = ' -cpu-nb '+str(arguments['-cpu-nb'])
# else:
# arg_cpu = ''
# registration parameters
if '-param' in arguments:
# reset parameters but keep step=0 (might be overwritten if user specified step=0)
paramreg = ParamregMultiStep([step0])
if ref == 'subject':
paramreg.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz'
# add user parameters
for paramStep in arguments['-param']:
paramreg.addStep(paramStep)
else:
paramreg = ParamregMultiStep([step0, step1, step2])
# if ref=subject, initialize registration using different affine parameters
if ref == 'subject':
paramreg.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz'
# initialize other parameters
# file_template_label = param.file_template_label
zsubsample = param.zsubsample
template = os.path.basename(os.path.normpath(path_template))
# smoothing_sigma = param.smoothing_sigma
# retrieve template file names
from sct_warp_template import get_file_label
file_template_vertebral_labeling = get_file_label(path_template+'template/', 'vertebral')
file_template = get_file_label(path_template+'template/', contrast_template.upper()+'-weighted')
file_template_seg = get_file_label(path_template+'template/', 'spinal cord')
# start timer
start_time = time.time()
# get fname of the template + template objects
fname_template = path_template+'template/'+file_template
fname_template_vertebral_labeling = path_template+'template/'+file_template_vertebral_labeling
fname_template_seg = path_template+'template/'+file_template_seg
# check file existence
# TODO: no need to do that!
sct.printv('\nCheck template files...')
sct.check_file_exist(fname_template, verbose)
sct.check_file_exist(fname_template_vertebral_labeling, verbose)
sct.check_file_exist(fname_template_seg, verbose)
# print arguments
sct.printv('\nCheck parameters:', verbose)
sct.printv(' Data: '+fname_data, verbose)
sct.printv(' Landmarks: '+fname_landmarks, verbose)
sct.printv(' Segmentation: '+fname_seg, verbose)
sct.printv(' Path template: '+path_template, verbose)
sct.printv(' Remove temp files: '+str(remove_temp_files), verbose)
# create QC folder
sct.create_folder(param.path_qc)
#
# sct.printv('\nParameters for registration:')
# for pStep in range(0, len(paramreg.steps)):
# sct.printv('Step #'+paramreg.steps[str(pStep)].step, verbose)
# sct.printv(' Type .................... '+paramreg.steps[str(pStep)].type, verbose)
# sct.printv(' Algorithm ............... '+paramreg.steps[str(pStep)].algo, verbose)
# sct.printv(' Metric .................. '+paramreg.steps[str(pStep)].metric, verbose)
# sct.printv(' Number of iterations .... '+paramreg.steps[str(pStep)].iter, verbose)
# sct.printv(' Shrink factor ........... '+paramreg.steps[str(pStep)].shrink, verbose)
# sct.printv(' Smoothing factor......... '+paramreg.steps[str(pStep)].smooth, verbose)
# sct.printv(' Gradient step ........... '+paramreg.steps[str(pStep)].gradStep, verbose)
# sct.printv(' Degree of polynomial .... '+paramreg.steps[str(pStep)].poly, verbose)
path_data, file_data, ext_data = sct.extract_fname(fname_data)
sct.printv('\nCheck if data, segmentation and landmarks are in the same space...')
if not sct.check_if_same_space(fname_data, fname_seg):
sct.printv('ERROR: Data image and segmentation are not in the same space. Please check space and orientation of your files', verbose, 'error')
if not sct.check_if_same_space(fname_data, fname_landmarks):
sct.printv('ERROR: Data image and landmarks are not in the same space. Please check space and orientation of your files', verbose, 'error')
sct.printv('\nCheck input labels...')
# check if label image contains coherent labels
image_label = Image(fname_landmarks)
# -> all labels must be different
labels = image_label.getNonZeroCoordinates(sorting='value')
hasDifferentLabels = True
for lab in labels:
for otherlabel in labels:
if lab != otherlabel and lab.hasEqualValue(otherlabel):
hasDifferentLabels = False
break
if not hasDifferentLabels:
sct.printv('ERROR: Wrong landmarks input. All labels must be different.', verbose, 'error')
# create temporary folder
path_tmp = sct.tmp_create(verbose=verbose)
# set temporary file names
ftmp_data = 'data.nii'
ftmp_seg = 'seg.nii.gz'
ftmp_label = 'label.nii.gz'
ftmp_template = 'template.nii'
ftmp_template_seg = 'template_seg.nii.gz'
ftmp_template_label = 'template_label.nii.gz'
# copy files to temporary folder
sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
sct.run('sct_convert -i '+fname_data+' -o '+path_tmp+ftmp_data)
sct.run('sct_convert -i '+fname_seg+' -o '+path_tmp+ftmp_seg)
sct.run('sct_convert -i '+fname_landmarks+' -o '+path_tmp+ftmp_label)
sct.run('sct_convert -i '+fname_template+' -o '+path_tmp+ftmp_template)
sct.run('sct_convert -i '+fname_template_seg+' -o '+path_tmp+ftmp_template_seg)
# sct.run('sct_convert -i '+fname_template_label+' -o '+path_tmp+ftmp_template_label)
# go to tmp folder
os.chdir(path_tmp)
# Generate labels from template vertebral labeling
sct.printv('\nGenerate labels from template vertebral labeling', verbose)
sct.run('sct_label_utils -i '+fname_template_vertebral_labeling+' -vert-body 0 -o '+ftmp_template_label)
# check if provided labels are available in the template
sct.printv('\nCheck if provided labels are available in the template', verbose)
image_label_template = Image(ftmp_template_label)
labels_template = image_label_template.getNonZeroCoordinates(sorting='value')
if labels[-1].value > labels_template[-1].value:
sct.printv('ERROR: Wrong landmarks input. Labels must have correspondence in template space. \nLabel max '
'provided: ' + str(labels[-1].value) + '\nLabel max from template: ' +
str(labels_template[-1].value), verbose, 'error')
# binarize segmentation (in case it has values below 0 caused by manual editing)
sct.printv('\nBinarize segmentation', verbose)
sct.run('sct_maths -i seg.nii.gz -bin 0.5 -o seg.nii.gz')
# smooth segmentation (jcohenadad, issue #613)
# sct.printv('\nSmooth segmentation...', verbose)
# sct.run('sct_maths -i '+ftmp_seg+' -smooth 1.5 -o '+add_suffix(ftmp_seg, '_smooth'))
# jcohenadad: updated 2016-06-16: DO NOT smooth the seg anymore. Issue #
# sct.run('sct_maths -i '+ftmp_seg+' -smooth 0 -o '+add_suffix(ftmp_seg, '_smooth'))
# ftmp_seg = add_suffix(ftmp_seg, '_smooth')
# Switch between modes: subject->template or template->subject
if ref == 'template':
# resample data to 1mm isotropic
sct.printv('\nResample data to 1mm isotropic...', verbose)
sct.run('sct_resample -i '+ftmp_data+' -mm 1.0x1.0x1.0 -x linear -o '+add_suffix(ftmp_data, '_1mm'))
ftmp_data = add_suffix(ftmp_data, '_1mm')
sct.run('sct_resample -i '+ftmp_seg+' -mm 1.0x1.0x1.0 -x linear -o '+add_suffix(ftmp_seg, '_1mm'))
ftmp_seg = add_suffix(ftmp_seg, '_1mm')
# N.B. resampling of labels is more complicated, because they are single-point labels, therefore resampling with neighrest neighbour can make them disappear. Therefore a more clever approach is required.
resample_labels(ftmp_label, ftmp_data, add_suffix(ftmp_label, '_1mm'))
ftmp_label = add_suffix(ftmp_label, '_1mm')
# Change orientation of input images to RPI
sct.printv('\nChange orientation of input images to RPI...', verbose)
sct.run('sct_image -i '+ftmp_data+' -setorient RPI -o '+add_suffix(ftmp_data, '_rpi'))
ftmp_data = add_suffix(ftmp_data, '_rpi')
sct.run('sct_image -i '+ftmp_seg+' -setorient RPI -o '+add_suffix(ftmp_seg, '_rpi'))
ftmp_seg = add_suffix(ftmp_seg, '_rpi')
sct.run('sct_image -i '+ftmp_label+' -setorient RPI -o '+add_suffix(ftmp_label, '_rpi'))
ftmp_label = add_suffix(ftmp_label, '_rpi')
# get landmarks in native space
# crop segmentation
# output: segmentation_rpi_crop.nii.gz
status_crop, output_crop = sct.run('sct_crop_image -i '+ftmp_seg+' -o '+add_suffix(ftmp_seg, '_crop')+' -dim 2 -bzmax', verbose)
ftmp_seg = add_suffix(ftmp_seg, '_crop')
cropping_slices = output_crop.split('Dimension 2: ')[1].split('\n')[0].split(' ')
# straighten segmentation
sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose)
# check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time)
if os.path.isfile('../warp_curve2straight.nii.gz') and os.path.isfile('../warp_straight2curve.nii.gz') and os.path.isfile('../straight_ref.nii.gz'):
# if they exist, copy them into current folder
sct.printv('WARNING: Straightening was already run previously. Copying warping fields...', verbose, 'warning')
shutil.copy('../warp_curve2straight.nii.gz', 'warp_curve2straight.nii.gz')
shutil.copy('../warp_straight2curve.nii.gz', 'warp_straight2curve.nii.gz')
shutil.copy('../straight_ref.nii.gz', 'straight_ref.nii.gz')
# apply straightening
sct.run('sct_apply_transfo -i '+ftmp_seg+' -w warp_curve2straight.nii.gz -d straight_ref.nii.gz -o '+add_suffix(ftmp_seg, '_straight'))
else:
sct.run('sct_straighten_spinalcord -i '+ftmp_seg+' -s '+ftmp_seg+' -o '+add_suffix(ftmp_seg, '_straight')+' -qc 0 -r 0 -v '+str(verbose), verbose)
# N.B. DO NOT UPDATE VARIABLE ftmp_seg BECAUSE TEMPORARY USED LATER
# re-define warping field using non-cropped space (to avoid issue #367)
sct.run('sct_concat_transfo -w warp_straight2curve.nii.gz -d '+ftmp_data+' -o warp_straight2curve.nii.gz')
# Label preparation:
# --------------------------------------------------------------------------------
# Remove unused label on template. Keep only label present in the input label image
sct.printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose)
sct.run('sct_label_utils -i '+ftmp_template_label+' -o '+ftmp_template_label+' -remove '+ftmp_label)
# Dilating the input label so they can be straighten without losing them
sct.printv('\nDilating input labels using 3vox ball radius')
sct.run('sct_maths -i '+ftmp_label+' -o '+add_suffix(ftmp_label, '_dilate')+' -dilate 3')
ftmp_label = add_suffix(ftmp_label, '_dilate')
# Apply straightening to labels
sct.printv('\nApply straightening to labels...', verbose)
sct.run('sct_apply_transfo -i '+ftmp_label+' -o '+add_suffix(ftmp_label, '_straight')+' -d '+add_suffix(ftmp_seg, '_straight')+' -w warp_curve2straight.nii.gz -x nn')
ftmp_label = add_suffix(ftmp_label, '_straight')
# Compute rigid transformation straight landmarks --> template landmarks
sct.printv('\nEstimate transformation for step #0...', verbose)
from msct_register_landmarks import register_landmarks
try:
register_landmarks(ftmp_label, ftmp_template_label, paramreg.steps['0'].dof, fname_affine='straight2templateAffine.txt', verbose=verbose)
except Exception:
sct.printv('ERROR: input labels do not seem to be at the right place. Please check the position of the labels. See documentation for more details: https://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/', verbose=verbose, type='error')
# Concatenate transformations: curve --> straight --> affine
sct.printv('\nConcatenate transformations: curve --> straight --> affine...', verbose)
sct.run('sct_concat_transfo -w warp_curve2straight.nii.gz,straight2templateAffine.txt -d template.nii -o warp_curve2straightAffine.nii.gz')
# Apply transformation
sct.printv('\nApply transformation...', verbose)
sct.run('sct_apply_transfo -i '+ftmp_data+' -o '+add_suffix(ftmp_data, '_straightAffine')+' -d '+ftmp_template+' -w warp_curve2straightAffine.nii.gz')
ftmp_data = add_suffix(ftmp_data, '_straightAffine')
sct.run('sct_apply_transfo -i '+ftmp_seg+' -o '+add_suffix(ftmp_seg, '_straightAffine')+' -d '+ftmp_template+' -w warp_curve2straightAffine.nii.gz -x linear')
ftmp_seg = add_suffix(ftmp_seg, '_straightAffine')
"""
# Benjamin: Issue from Allan Martin, about the z=0 slice that is screwed up, caused by the affine transform.
# Solution found: remove slices below and above landmarks to avoid rotation effects
points_straight = []
for coord in landmark_template:
points_straight.append(coord.z)
min_point, max_point = int(round(np.min(points_straight))), int(round(np.max(points_straight)))
sct.run('sct_crop_image -i ' + ftmp_seg + ' -start ' + str(min_point) + ' -end ' + str(max_point) + ' -dim 2 -b 0 -o ' + add_suffix(ftmp_seg, '_black'))
ftmp_seg = add_suffix(ftmp_seg, '_black')
"""
# binarize
sct.printv('\nBinarize segmentation...', verbose)
sct.run('sct_maths -i '+ftmp_seg+' -bin 0.5 -o '+add_suffix(ftmp_seg, '_bin'))
ftmp_seg = add_suffix(ftmp_seg, '_bin')
# find min-max of anat2template (for subsequent cropping)
zmin_template, zmax_template = find_zmin_zmax(ftmp_seg)
# crop template in z-direction (for faster processing)
sct.printv('\nCrop data in template space (for faster processing)...', verbose)
sct.run('sct_crop_image -i '+ftmp_template+' -o '+add_suffix(ftmp_template, '_crop')+' -dim 2 -start '+str(zmin_template)+' -end '+str(zmax_template))
ftmp_template = add_suffix(ftmp_template, '_crop')
sct.run('sct_crop_image -i '+ftmp_template_seg+' -o '+add_suffix(ftmp_template_seg, '_crop')+' -dim 2 -start '+str(zmin_template)+' -end '+str(zmax_template))
ftmp_template_seg = add_suffix(ftmp_template_seg, '_crop')
sct.run('sct_crop_image -i '+ftmp_data+' -o '+add_suffix(ftmp_data, '_crop')+' -dim 2 -start '+str(zmin_template)+' -end '+str(zmax_template))
ftmp_data = add_suffix(ftmp_data, '_crop')
sct.run('sct_crop_image -i '+ftmp_seg+' -o '+add_suffix(ftmp_seg, '_crop')+' -dim 2 -start '+str(zmin_template)+' -end '+str(zmax_template))
ftmp_seg = add_suffix(ftmp_seg, '_crop')
# sub-sample in z-direction
sct.printv('\nSub-sample in z-direction (for faster processing)...', verbose)
sct.run('sct_resample -i '+ftmp_template+' -o '+add_suffix(ftmp_template, '_sub')+' -f 1x1x'+zsubsample, verbose)
ftmp_template = add_suffix(ftmp_template, '_sub')
sct.run('sct_resample -i '+ftmp_template_seg+' -o '+add_suffix(ftmp_template_seg, '_sub')+' -f 1x1x'+zsubsample, verbose)
ftmp_template_seg = add_suffix(ftmp_template_seg, '_sub')
sct.run('sct_resample -i '+ftmp_data+' -o '+add_suffix(ftmp_data, '_sub')+' -f 1x1x'+zsubsample, verbose)
ftmp_data = add_suffix(ftmp_data, '_sub')
sct.run('sct_resample -i '+ftmp_seg+' -o '+add_suffix(ftmp_seg, '_sub')+' -f 1x1x'+zsubsample, verbose)
ftmp_seg = add_suffix(ftmp_seg, '_sub')
# Registration straight spinal cord to template
sct.printv('\nRegister straight spinal cord to template...', verbose)
# loop across registration steps
warp_forward = []
warp_inverse = []
for i_step in range(1, len(paramreg.steps)):
sct.printv('\nEstimate transformation for step #'+str(i_step)+'...', verbose)
# identify which is the src and dest
if paramreg.steps[str(i_step)].type == 'im':
src = ftmp_data
dest = ftmp_template
interp_step = 'linear'
elif paramreg.steps[str(i_step)].type == 'seg':
src = ftmp_seg
dest = ftmp_template_seg
interp_step = 'nn'
else:
sct.printv('ERROR: Wrong image type.', 1, 'error')
# if step>1, apply warp_forward_concat to the src image to be used
if i_step > 1:
# sct.run('sct_apply_transfo -i '+src+' -d '+dest+' -w '+','.join(warp_forward)+' -o '+sct.add_suffix(src, '_reg')+' -x '+interp_step, verbose)
# apply transformation from previous step, to use as new src for registration
sct.run('sct_apply_transfo -i '+src+' -d '+dest+' -w '+','.join(warp_forward)+' -o '+add_suffix(src, '_regStep'+str(i_step-1))+' -x '+interp_step, verbose)
src = add_suffix(src, '_regStep'+str(i_step-1))
# register src --> dest
# TODO: display param for debugging
warp_forward_out, warp_inverse_out = register(src, dest, paramreg, param, str(i_step))
warp_forward.append(warp_forward_out)
warp_inverse.append(warp_inverse_out)
# Concatenate transformations:
sct.printv('\nConcatenate transformations: anat --> template...', verbose)
sct.run('sct_concat_transfo -w warp_curve2straightAffine.nii.gz,'+','.join(warp_forward)+' -d template.nii -o warp_anat2template.nii.gz', verbose)
# sct.run('sct_concat_transfo -w warp_curve2straight.nii.gz,straight2templateAffine.txt,'+','.join(warp_forward)+' -d template.nii -o warp_anat2template.nii.gz', verbose)
sct.printv('\nConcatenate transformations: template --> anat...', verbose)
warp_inverse.reverse()
sct.run('sct_concat_transfo -w '+','.join(warp_inverse)+',-straight2templateAffine.txt,warp_straight2curve.nii.gz -d data.nii -o warp_template2anat.nii.gz', verbose)
# register template->subject
elif ref == 'subject':
# Change orientation of input images to RPI
sct.printv('\nChange orientation of input images to RPI...', verbose)
sct.run('sct_image -i ' + ftmp_data + ' -setorient RPI -o ' + add_suffix(ftmp_data, '_rpi'))
ftmp_data = add_suffix(ftmp_data, '_rpi')
sct.run('sct_image -i ' + ftmp_seg + ' -setorient RPI -o ' + add_suffix(ftmp_seg, '_rpi'))
ftmp_seg = add_suffix(ftmp_seg, '_rpi')
sct.run('sct_image -i ' + ftmp_label + ' -setorient RPI -o ' + add_suffix(ftmp_label, '_rpi'))
ftmp_label = add_suffix(ftmp_label, '_rpi')
# Remove unused label on template. Keep only label present in the input label image
sct.printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose)
sct.run('sct_label_utils -i '+ftmp_template_label+' -o '+ftmp_template_label+' -remove '+ftmp_label)
# Add one label because at least 3 orthogonal labels are required to estimate an affine transformation. This new label is added at the level of the upper most label (lowest value), at 1cm to the right.
for i_file in [ftmp_label, ftmp_template_label]:
im_label = Image(i_file)
coord_label = im_label.getCoordinatesAveragedByValue() # N.B. landmarks are sorted by value
# Create new label
from copy import deepcopy
new_label = deepcopy(coord_label[0])
# move it 5mm to the left (orientation is RAS)
nx, ny, nz, nt, px, py, pz, pt = im_label.dim
new_label.x = round(coord_label[0].x + 5.0 / px)
# assign value 99
new_label.value = 99
# Add to existing image
im_label.data[new_label.x, new_label.y, new_label.z] = new_label.value
# Overwrite label file
# im_label.setFileName('label_rpi_modif.nii.gz')
im_label.save()
# Bring template to subject space using landmark-based transformation
sct.printv('\nEstimate transformation for step #0...', verbose)
from msct_register_landmarks import register_landmarks
warp_forward = ['template2subjectAffine.txt']
warp_inverse = ['-template2subjectAffine.txt']
try:
register_landmarks(ftmp_template_label, ftmp_label, paramreg.steps['0'].dof, fname_affine=warp_forward[0], verbose=verbose, path_qc=param.path_qc)
except Exception:
sct.printv('ERROR: input labels do not seem to be at the right place. Please check the position of the labels. See documentation for more details: https://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/', verbose=verbose, type='error')
# loop across registration steps
for i_step in range(1, len(paramreg.steps)):
sct.printv('\nEstimate transformation for step #'+str(i_step)+'...', verbose)
# identify which is the src and dest
if paramreg.steps[str(i_step)].type == 'im':
src = ftmp_template
dest = ftmp_data
interp_step = 'linear'
elif paramreg.steps[str(i_step)].type == 'seg':
src = ftmp_template_seg
dest = ftmp_seg
interp_step = 'nn'
else:
sct.printv('ERROR: Wrong image type.', 1, 'error')
# apply transformation from previous step, to use as new src for registration
sct.run('sct_apply_transfo -i '+src+' -d '+dest+' -w '+','.join(warp_forward)+' -o '+add_suffix(src, '_regStep'+str(i_step-1))+' -x '+interp_step, verbose)
src = add_suffix(src, '_regStep'+str(i_step-1))
# register src --> dest
# TODO: display param for debugging
warp_forward_out, warp_inverse_out = register(src, dest, paramreg, param, str(i_step))
warp_forward.append(warp_forward_out)
warp_inverse.insert(0, warp_inverse_out)
# Concatenate transformations:
sct.printv('\nConcatenate transformations: template --> subject...', verbose)
sct.run('sct_concat_transfo -w '+','.join(warp_forward)+' -d data.nii -o warp_template2anat.nii.gz', verbose)
sct.printv('\nConcatenate transformations: subject --> template...', verbose)
sct.run('sct_concat_transfo -w '+','.join(warp_inverse)+' -d template.nii -o warp_anat2template.nii.gz', verbose)
# Apply warping fields to anat and template
sct.run('sct_apply_transfo -i template.nii -o template2anat.nii.gz -d data.nii -w warp_template2anat.nii.gz -crop 1', verbose)
sct.run('sct_apply_transfo -i data.nii -o anat2template.nii.gz -d template.nii -w warp_anat2template.nii.gz -crop 1', verbose)
# come back to parent folder
os.chdir('..')
# Generate output files
sct.printv('\nGenerate output files...', verbose)
sct.generate_output_file(path_tmp+'warp_template2anat.nii.gz', path_output+'warp_template2anat.nii.gz', verbose)
sct.generate_output_file(path_tmp+'warp_anat2template.nii.gz', path_output+'warp_anat2template.nii.gz', verbose)
sct.generate_output_file(path_tmp+'template2anat.nii.gz', path_output+'template2anat'+ext_data, verbose)
sct.generate_output_file(path_tmp+'anat2template.nii.gz', path_output+'anat2template'+ext_data, verbose)
if ref == 'template':
# copy straightening files in case subsequent SCT functions need them
sct.generate_output_file(path_tmp+'warp_curve2straight.nii.gz', path_output+'warp_curve2straight.nii.gz', verbose)
sct.generate_output_file(path_tmp+'warp_straight2curve.nii.gz', path_output+'warp_straight2curve.nii.gz', verbose)
sct.generate_output_file(path_tmp+'straight_ref.nii.gz', path_output+'straight_ref.nii.gz', verbose)
# Delete temporary files
if remove_temp_files:
sct.printv('\nDelete temporary files...', verbose)
sct.run('rm -rf '+path_tmp)
# Decode the parameters of -param-qc, verification done here because if name of param-qc changes, easier to change here
qcParams = None
if '-param-qc' in arguments:
qcParams = msct_qc.Qc_Params(arguments['-param-qc'])
# Need to verify in the case that "generate" arg is provided and means false else we will generate qc
if qcParams is None or qcParams.generate_report is True:
sct.printv("\nPreparing QC Report...\n")
# Qc_Report generates and contains the useful infos for qc generation
qcReport = msct_qc.Qc_Report("sct_register_to_template", qcParams, sys.argv[1:], parser.usage.description)
# Create the Qc object that creates the images files to provide to the HTML
@msct_qc.Qc(qcReport, action_list=[msct_qc.Qc.no_seg_seg])
def template_2_anat_qc(sct_slice):
# Chosen axe to generate image
return sct_slice.single()
output_filename_t2a = 'template2anat'+ext_data
template_2_anat_qc(msct_qc.template2anat_sagittal(fname_data, output_filename_t2a, fname_seg))
# display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s', verbose)
# to view results
sct.printv('\nTo view results, type:', verbose)
sct.printv('fslview '+fname_data+' '+path_output+'template2anat -b 0,4000 &', verbose, 'info')
sct.printv('fslview '+fname_template+' -b 0,5000 '+path_output+'anat2template &\n', verbose, 'info')
# Resample labels
# ==========================================================================================
def resample_labels(fname_labels, fname_dest, fname_output):
"""
This function re-create labels into a space that has been resampled. It works by re-defining the location of each
label using the old and new voxel size.
"""
# get dimensions of input and destination files
nx, ny, nz, nt, px, py, pz, pt = Image(fname_labels).dim
nxd, nyd, nzd, ntd, pxd, pyd, pzd, ptd = Image(fname_dest).dim
sampling_factor = [float(nx)/nxd, float(ny)/nyd, float(nz)/nzd]
# read labels
from sct_label_utils import ProcessLabels
processor = ProcessLabels(fname_labels)
label_list = processor.display_voxel()
label_new_list = []
for label in label_list:
label_sub_new = [str(int(round(int(label.x)/sampling_factor[0]))),
str(int(round(int(label.y)/sampling_factor[1]))),
str(int(round(int(label.z)/sampling_factor[2]))),
str(int(float(label.value)))]
label_new_list.append(','.join(label_sub_new))
label_new_list = ':'.join(label_new_list)
# create new labels
sct.run('sct_label_utils -i '+fname_dest+' -create '+label_new_list+' -v 1 -o '+fname_output)
# START PROGRAM
# ==========================================================================================
if __name__ == "__main__":
# call main function
main()
|
{
"content_hash": "fd8bf94baef987221276a579a7743b57",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 454,
"avg_line_length": 56.25816993464052,
"alnum_prop": 0.6151321521928551,
"repo_name": "3324fr/spinalcordtoolbox",
"id": "f1a5a41fb01438d4fcbd4baf77fdb98dd9898ecc",
"size": "35107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/sct_register_to_template.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5961"
},
{
"name": "C++",
"bytes": "1025992"
},
{
"name": "CMake",
"bytes": "18919"
},
{
"name": "CSS",
"bytes": "1384"
},
{
"name": "Groff",
"bytes": "3141"
},
{
"name": "HTML",
"bytes": "5315"
},
{
"name": "JavaScript",
"bytes": "2505"
},
{
"name": "KiCad",
"bytes": "5522"
},
{
"name": "Matlab",
"bytes": "275100"
},
{
"name": "Python",
"bytes": "4808677"
},
{
"name": "Shell",
"bytes": "193192"
}
],
"symlink_target": ""
}
|
"""Setup file for easy installation"""
from os.path import join, dirname
from setuptools import setup
version = '1.0b'
LONG_DESCRIPTION = """
Smarter declarative style generic views for Django.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
setup(name='django-smarter',
version=version,
author='Alexey Kinyov',
author_email='rudy@05bit.com',
description='Smarter declarative style generic views for Django.',
license='BSD',
keywords='django, application, scaffolding, crud, views, utility',
url='https://github.com/05bit/django-smarter',
packages=['smarter',],
include_package_data=True,
long_description=long_description(),
install_requires=['Django>=1.4',],
classifiers=['Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'])
|
{
"content_hash": "0e0ac63380088a3d1fa4ab88a9acfa95",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 36.86842105263158,
"alnum_prop": 0.6102783725910065,
"repo_name": "pombredanne/django-smarter",
"id": "3b05510f67c57a7a39642a5c630ca3fbb7324662",
"size": "1425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Experimental module defining classes used in Pikov animations."""
from .core import GuidNode
from .properties import StringProperty
from . import guids
class SemanticGraphNode(GuidNode):
"""I want all nodes to have an (optional) name property, so define the
node class after we've defined the property classes.
"""
def __init__(self, ctor, graph, guid=None):
super().__init__(graph, guid=guid)
graph.set_value(
self,
GuidNode(graph, guid=guids.CTOR),
GuidNode(graph, guid=_get_guid(ctor)))
name = StringProperty(GuidNode(None, guid=guids.NAME))
def _get_guid(node_or_guid):
if isinstance(node_or_guid, str):
return node_or_guid
return node_or_guid.guid
|
{
"content_hash": "be4cf8c8b8375bd96518bb62c983a335",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 27.85185185185185,
"alnum_prop": 0.6582446808510638,
"repo_name": "google/pikov",
"id": "a85d863d5bd9033fca1aee35465ebd647e0fabb5",
"size": "1328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pikov/pikov.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "270"
},
{
"name": "JavaScript",
"bytes": "4937"
},
{
"name": "Jupyter Notebook",
"bytes": "134190"
},
{
"name": "Python",
"bytes": "77675"
},
{
"name": "Shell",
"bytes": "1751"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth import authenticate, logout, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site, RequestSite
from shoppy.accounts.forms import *
from shoppy.accounts.models import Customer
from shoppy.newsletter.models import Subscriper
def registerCustomer(request, refcode=None):
"""
Register a new user by creating a new user and the user's profile,
a customer object.
"""
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('shoppy_home'))
#next = request.REQUEST.get('next', '')
#email = None
refid = ''
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
customer = form.save(request)
#if next:
# return HttpResponseRedirect(next)
return HttpResponseRedirect(reverse('shoppy_register_done'))
else:
init_data = {}
init_data['refcode'] = refcode
form = RegisterForm(initial=init_data)
return render_to_response('accounts/register_form.html', {
'form': form,
#'next': next,
#'mail': email,
#'refid': refid
},context_instance=RequestContext(request))
def register_done(request, template_name='accounts/register_done.html'):
return render_to_response(template_name, context_instance=RequestContext(request))
def confirm(request, confirm_key):
"""
the user needs to confirm his account-signup by clicking the
confirm-link.
TODO: multi-language!
"""
try:
ec = EmailConfirm.objects.get(confirmkey=confirm_key)
ec.user.is_active = True
ec.user.save()
ec.delete()
return HttpResponseRedirect(reverse('shoppy_account_confirm_done'))
except Subscriper.DoesNotExist:
raise Http404, "Der Bestätigungslink ist nicht korrekt."
def confirm_done(request, template_name='accounts/confirm_done.html'):
return render_to_response(template_name, context_instance=RequestContext(request))
def shop_login(request):
"""
Custom login for email-login instead of username.
Show a form for login and the option to switch to
the registration form.
TODO: rethink the exceptions!
"""
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('shoppy_home'))
errormsg = None
next = request.REQUEST.get('next', '')
form = LoginForm()
if request.method == 'POST': # If the form has been submitted...
#if not request.session.test_cookie_worked():
# errormsg = u'Bitte aktivieren Sie Cookies, damit Sie unsere Seite nutzen können.'
# return render_to_response('accounts/login_form.html', {
# 'form': form,'errormsg': errormsg, 'next':next,
# },context_instance=RequestContext(request))
#else:
# request.session.delete_test_cookie()
#next = request.POST['next']
#if request.POST['custaction'] == 'register':
# val = '%s?email=%s&next=%s' % (reverse('shoppy_register'),request.POST['username'], next)
#val = '/accounts/register/?email=%s&next=%s' % (request.POST['username'], next)
# return HttpResponseRedirect(val)
form = LoginForm(request.POST)
if form.is_valid():
user = userauth(email=form.cleaned_data['email'], password=form.cleaned_data['password'])
if user:
user = authenticate(username=user.username, password=form.cleaned_data['password'])
if user:
login(request, user)
if user.is_active:
if user.is_staff:
return HttpResponseRedirect(reverse('shoppy_manage'))
if next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse('shoppy_account_profile'))
else:
errormsg = 'Account gesperrt.'
else:
errormsg = 'E-Mail oder Passwort falsch.'
else:
errormsg = 'E-Mail oder Passwort falsch.'
else:
errormsg = 'E-Mail oder Passwort falsch.'
#request.session.set_test_cookie()
return render_to_response('accounts/login_form.html', {
'form': form,'errormsg': errormsg, 'next':next,
},context_instance=RequestContext(request))
"""Authenticate using email only"""
def userauth(email=None, password=None):
user = User.objects.filter(email=email)
if user.count() > 0:
user = user[0]
if user.check_password(password):
return user
return None
@login_required
def shop_logout(request):
logout(request)
return render_to_response('accounts/logout.html',{},
context_instance=RequestContext(request))
@login_required
def profile(request):
return render_to_response('accounts/profile.html',{}
,context_instance=RequestContext(request))
@login_required
def editCustomer(request):
"""
A form for the user to change his contact information/profile.
"""
init_data = {}
u = request.user
c = Customer.objects.get(user=u,isActive=True)
if request.method == 'POST':
form = EditCustomerForm(request.POST)
#form = PartialCustomerForm(request.POST, instance=Customer())
if form.is_valid():
cid = Customer.objects.get(user=request.user,isActive=True).id
c = form.save(cid)
return HttpResponseRedirect(reverse('shoppy_account_profile'))
else:
#EditCustomerFormSet = formset_factory(EditCustomerForm)
init_data['title'] = c.title.id
init_data['name'] = c.name
init_data['email'] = c.user.email
init_data['language'] = c.language.id
try:
subs = Subscriper.objects.get(user=c.user)
if subs.isActivated:
init_data['newsletter'] = True
else:
init_data['newsletter'] = False
except Subscriper.DoesNotExist:
init_data['newsletter'] = False
init_data['birthday'] = c.birthday
form = EditCustomerForm(initial=init_data)
return render_to_response('accounts/account_edit_form.html',{
'form': form,
'customer': c,
}
,context_instance=RequestContext(request))
def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm, token_generator=default_token_generator,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
def get_invited(request, template_name='accounts/get_invited_form.html',
email_template_name='accounts/get_invited_email.html',
get_invited_form=GetInvitedForm,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('shoppy.accounts.views.get_invited_done')
if request.method == "POST":
form = get_invited_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['email_template_name'] = email_template_name
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = get_invited_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
def get_invited_done(request, template_name='accounts/get_invited_done.html'):
return render_to_response(template_name, context_instance=RequestContext(request))
def request_invitation(request, template_name='accounts/request_invitation_form.html',
email_template_name='accounts/request_invitation_mail.html',
get_invited_form=RequestMailForm,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('shoppy.accounts.views.request_invitation_done')
if request.method == "POST":
form = get_invited_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['email_template_name'] = email_template_name
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = get_invited_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
def request_invitation_done(request, template_name='accounts/request_invitation_done.html'):
return render_to_response(template_name, context_instance=RequestContext(request))
def codeToReg(request):
refcheck_form = RefcodeCheckForm
if request.method == "POST":
form = refcheck_form(request.POST)
if form.is_valid():
refcode = form.cleaned_data.get('refcode')
return HttpResponseRedirect(reverse('shoppy_register', args=[refcode]))
return HttpResponseRedirect(reverse('shoppy_getinvited'))
|
{
"content_hash": "f35d5f3bfa1f0a16ac5e111333bfc98c",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 103,
"avg_line_length": 39.578947368421055,
"alnum_prop": 0.6246200607902735,
"repo_name": "pocketone/django-shoppy",
"id": "ef85764b76b0feb432fdbc64afad95fbfdee6015",
"size": "10576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shoppy/accounts/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from user_info import get_user_info
def check_status(self):
self.is_self_checking = True
self.is_checked = False
while self.is_checked != True:
get_user_info(self, self.user_login)
self.like_counter = 0
self.follow_counter = 0
self.unfollow_counter = 0
|
{
"content_hash": "4e0a10e36485f7d3568d7177faa9687f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.6655052264808362,
"repo_name": "Cartman2490/InstaBot",
"id": "4ceb66f103bb2b135e072d947bb53843fa735b15",
"size": "333",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/check_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76517"
}
],
"symlink_target": ""
}
|
""" Defines the basic Container class """
from contextlib import contextmanager
from enable.base import empty_rectangle, intersect_bounds
from enable.events import MouseEvent
from kiva import affine
from traits.api import Instance, List, Property
from .component import Component
class Container(Component):
""" A Container that holds components and other containers.
This represents a general container class of a composite structure [GoF]_.
.. [GoF] Design Patterns: Elements of Reusable Object Oriented Software,
Gamma et al., Addison-Wesley, 1996.
"""
# The list of components within this frame
components = Property(List(Component))
# Shadow trait for self.components
_components = List(Component)
# Set of components that last handled a mouse event. This allows us to
# generate mouse_enter and mouse_leave events of our own.
_prev_event_handlers = Instance(set, ())
# -----------------------------------------------------------------------
# Public methods
# -----------------------------------------------------------------------
def add(self, *components):
""" Adds components to this container """
for component in components:
if component.container is not None:
component.container.remove(component)
component.container = self
self._components.extend(components)
def remove(self, *components):
for component in components:
if component is not None:
assert component.container == self
self._components.remove(component)
component.container == None
def components_at(self, x, y):
""" Returns components underneath the given point.
Input point is specified in parent container's coordinate space.
"""
result = []
if self.is_in(x, y):
for component in self._components[::-1]:
if component.is_in(x - self.x, y - self.y):
result.append(component)
return result
def cleanup(self, window):
""" Perform any necessary cleanup. """
if self._components:
for component in self._components:
component.cleanup(window)
def dispatch(self, event, suffix):
""" Dispatches mouse event to child components until it is handled.
Parameters
----------
event : BaseEvent
A mouse or key event.
suffix : string
The name of the mouse event as a suffix to the event state name,
e.g. "left_down" or "window_enter".
"""
if event.handled:
return
# Get components under event and then transform to local coordinates.
components = self.components_at(event.x, event.y)
with self._local_event_transform(event):
component_set = set(components)
components_left = self._prev_event_handlers - component_set
components_entered = component_set - self._prev_event_handlers
dispatch_mouse_event = self._dispatch_if_mouse_event
dispatch_mouse_event(components_left, event, 'mouse_leave')
if suffix != 'mouse_leave':
dispatch_mouse_event(components_entered, event, 'mouse_enter')
self._prev_event_handlers = set()
for component in components:
component.dispatch(event, suffix)
# Only add handler if it actually received the event.
self._prev_event_handlers.add(component)
if event.handled:
break
if not event.handled:
super(Container, self).dispatch(event, suffix)
# -------------------------------------------------------------------------
# Protected interface
# -------------------------------------------------------------------------
def _main_layers(self, view_rect):
return self._get_visible_components(view_rect)
# -----------------------------------------------------------------------
# Property setters & getters
# -----------------------------------------------------------------------
def _get_components(self):
return self._components
def _get_layout_needed(self):
# Override the parent implementation child components.
return (self._layout_needed
or any(c.layout_needed for c in self.components))
# -----------------------------------------------------------------------
# Event handlers
# -----------------------------------------------------------------------
def _component_origin_changed(self):
"""Called by contained objects when their origins change"""
self._origin_changed()
def _component_size_changed(self):
"""Called by contained objects when their size change"""
self._size_changed()
def _size_changed(self):
super(Container, self)._size_changed()
self._layout_needed = True
def __components_items_changed(self, event):
self._layout_needed = True
# -------------------------------------------------------------------------
# Private interface
# -------------------------------------------------------------------------
def _get_visible_components(self, bounds):
""" Returns a list of this plot's children that are in the bounds. """
if bounds is None:
return [c for c in self.components if c.visible]
return [c for c in self.components
if intersect_bounds(c.rect, bounds) != empty_rectangle]
def _get_event_transform(self, event=None):
return affine.affine_from_translation(-self.x, -self.y)
def _dispatch_if_mouse_event(self, components, event, suffix):
if not isinstance(event, MouseEvent):
return
for component in components:
component.dispatch(event, suffix)
event.handled = False
@contextmanager
def _local_event_transform(self, event):
""" Translate event location to be relative to this container. """
try:
transform = self._get_event_transform(event)
event.push_transform(transform, caller=self)
yield
finally:
event.pop(caller=self)
|
{
"content_hash": "a1861497bf496abfba7713ae69fa3159",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 36.76300578034682,
"alnum_prop": 0.5424528301886793,
"repo_name": "tonysyu/deli",
"id": "91e79d5ee415ee1bd2c1fc14b51e8125b1b95c40",
"size": "6360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deli/core/container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1233"
},
{
"name": "Python",
"bytes": "310799"
},
{
"name": "Shell",
"bytes": "5101"
}
],
"symlink_target": ""
}
|
from formencode.htmlrename import rename, add_prefix
def test_rename():
assert (rename('<input type="text" name="a_name">', lambda name: name.upper())
== '<input type="text" name="A_NAME">')
assert (add_prefix('<input type="text" name="a_name"><input type="text" name="">', 'test', dotted=True)
== '<input type="text" name="test.a_name"><input type="text" name="test">')
assert (add_prefix('text<textarea name="a_name">value</textarea>text2', 'prefix.')
== 'text<textarea name="prefix.a_name">value</textarea>text2')
assert (add_prefix('<textarea name="" rows=2 style="width: 100%" id="field-0"></textarea>',
'street', dotted=True)
== '<textarea name="street" rows="2" style="width: 100%" id="field-0"></textarea>')
|
{
"content_hash": "2051e4132e08057c9c498280e1de6ff5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 107,
"avg_line_length": 61.92307692307692,
"alnum_prop": 0.5987577639751552,
"repo_name": "grepme/CMPUT410Lab01",
"id": "86a6f92dc917186d5a84736ee7a93c92f8c80605",
"size": "805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "virt_env/virt1/lib/python2.7/site-packages/FormEncode-1.3.0a1-py2.7.egg/formencode/tests/test_htmlrename.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "43292"
},
{
"name": "JavaScript",
"bytes": "231360"
},
{
"name": "Makefile",
"bytes": "2195"
},
{
"name": "Python",
"bytes": "11792754"
},
{
"name": "Shell",
"bytes": "3955"
}
],
"symlink_target": ""
}
|
import pickle
from os import error
import numpy as np
import numbers
import warnings
from scipy import signal
import astropy.modeling.models
from stingray import utils
from stingray import Lightcurve
from stingray import AveragedPowerspectrum
__all__ = ['Simulator']
class Simulator(object):
"""
Methods to simulate and visualize light curves.
TODO: Improve documentation
Parameters
----------
dt : int, default 1
time resolution of simulated light curve
N : int, default 1024
bins count of simulated light curve
mean : float, default 0
mean value of the simulated light curve
rms : float, default 1
fractional rms of the simulated light curve,
actual rms is calculated by mean*rms
err : float, default 0
the errorbars on the final light curve
red_noise : int, default 1
multiple of real length of light curve, by
which to simulate, to avoid red noise leakage
random_state : int, default None
seed value for random processes
poisson : bool, default False
return Poisson-distributed light curves.
"""
def __init__(self, dt, N, mean, rms, err=0., red_noise=1,
random_state=None, tstart=0.0, poisson=False):
self.dt = dt
if not isinstance(N, (int, np.integer)):
raise ValueError("N must be integer!")
self.N = N
if mean == 0:
warnings.warn("Careful! A mean of zero is unphysical!" +
"This may have unintended consequences!")
self.mean = mean
self.nphot = self.mean * self.N
self.rms = rms
self.red_noise = red_noise
self.tstart = tstart
self.time = dt*np.arange(N) + self.tstart
self.nphot_factor = 1000_000
self.err = err
self.poisson = poisson
# Initialize a tuple of energy ranges with corresponding light curves
self.channels = []
self.random_state = utils.get_random_state(random_state)
assert rms <= 1, 'Fractional rms must be less than 1.'
assert dt > 0, 'Time resolution must be greater than 0'
def simulate(self, *args):
"""
Simulate light curve generation using power spectrum or
impulse response.
Examples
--------
* x = simulate(beta):
For generating a light curve using power law spectrum.
Parameters:
* beta : float
Defines the shape of spectrum
* x = simulate(s):
For generating a light curve from user-provided spectrum.
**Note**: In this case, the `red_noise` parameter is provided.
You can generate a longer light curve by providing a higher
frequency resolution on the input power spectrum.
Parameters:
* s : array-like
power spectrum
* x = simulate(model):
For generating a light curve from pre-defined model
Parameters:
* model : astropy.modeling.Model
the pre-defined model
* x = simulate('model', params):
For generating a light curve from pre-defined model
Parameters:
* model : string
the pre-defined model
* params : list iterable or dict
the parameters for the pre-defined model
* x = simulate(s, h):
For generating a light curve using impulse response.
Parameters:
* s : array-like
Underlying variability signal
* h : array-like
Impulse response
* x = simulate(s, h, 'same'):
For generating a light curve of same length as input signal,
using impulse response.
Parameters:
* s : array-like
Underlying variability signal
* h : array-like
Impulse response
* mode : str
mode can be 'same', 'filtered, or 'full'.
'same' indicates that the length of output light
curve is same as that of input signal.
'filtered' means that length of output light curve
is len(s) - lag_delay
'full' indicates that the length of output light
curve is len(s) + len(h) -1
Parameters
----------
args
See examples below.
Returns
-------
lightCurve : `LightCurve` object
"""
if isinstance(args[0], (numbers.Integral, float)) and len(args) == 1:
return self._simulate_power_law(args[0])
elif isinstance(args[0], astropy.modeling.Model) and len(args) == 1:
return self._simulate_model(args[0])
elif utils.is_string(args[0]) and len(args) == 2:
return self._simulate_model_string(args[0], args[1])
elif len(args) == 1:
return self._simulate_power_spectrum(args[0])
elif len(args) == 2:
return self._simulate_impulse_response(args[0], args[1])
elif len(args) == 3:
return self._simulate_impulse_response(args[0], args[1], args[2])
else:
raise ValueError("Length of arguments must be 1, 2 or 3.")
def simulate_channel(self, channel, *args):
"""
Simulate a lightcurve and add it to corresponding energy
channel.
Parameters
----------
channel : str
range of energy channel (e.g., 3.5-4.5)
*args
see description of simulate() for details
Returns
-------
lightCurve : `LightCurve` object
"""
# Check that channel name does not already exist.
if channel not in [lc[0] for lc in self.channels]:
self.channels.append((channel, self.simulate(*args)))
else:
raise KeyError('A channel with this name already exists.')
def get_channel(self, channel):
"""
Get lightcurve belonging to the energy channel.
"""
return [lc[1] for lc in self.channels if lc[0] == channel][0]
def get_channels(self, channels):
"""
Get multiple light curves belonging to the energy channels.
"""
return [lc[1] for lc in self.channels if lc[0] in channels]
def get_all_channels(self):
"""
Get lightcurves belonging to all channels.
"""
return [lc[1] for lc in self.channels]
def delete_channel(self, channel):
"""
Delete an energy channel.
"""
channel = [lc for lc in self.channels if lc[0] == channel]
if len(channel) == 0:
raise KeyError('This channel does not exist or has already been '
'deleted.')
else:
index = self.channels.index(channel[0])
del self.channels[index]
def delete_channels(self, channels):
"""
Delete multiple energy channels.
"""
n = len(channels)
channels = [lc for lc in self.channels if lc[0] in channels]
if len(channels) != n:
raise KeyError('One of more of the channels do not exist or have '
'already been deleted.')
else:
indices = [self.channels.index(channel) for channel in channels]
for i in sorted(indices, reverse=True):
del self.channels[i]
def count_channels(self):
"""
Return total number of energy channels.
"""
return len(self.channels)
def simple_ir(self, start=0, width=1000, intensity=1):
"""
Construct a simple impulse response using start time,
width and scaling intensity.
To create a delta impulse response, set width to 1.
Parameters
----------
start : int
start time of impulse response
width : int
width of impulse response
intensity : float
scaling parameter to set the intensity of delayed emission
corresponding to direct emission.
Returns
-------
h : numpy.ndarray
Constructed impulse response
"""
# Fill in 0 entries until the start time
h_zeros = np.zeros(int(start/self.dt))
# Define constant impulse response
h_ones = np.ones(int(width/self.dt)) * intensity
return np.append(h_zeros, h_ones)
def relativistic_ir(self, t1=3, t2=4, t3=10, p1=1, p2=1.4, rise=0.6,
decay=0.1):
"""
Construct a realistic impulse response considering the relativistic
effects.
Parameters
----------
t1 : int
primary peak time
t2 : int
secondary peak time
t3 : int
end time
p1 : float
value of primary peak
p2 : float
value of secondary peak
rise : float
slope of rising exponential from primary peak to secondary peak
decay : float
slope of decaying exponential from secondary peak to end time
Returns
-------
h : numpy.ndarray
Constructed impulse response
"""
dt = self.dt
assert t2 > t1, 'Secondary peak must be after primary peak.'
assert t3 > t2, 'End time must be after secondary peak.'
assert p2 > p1, 'Secondary peak must be greater than primary peak.'
# Append zeros before start time
h_primary = np.append(np.zeros(int(t1/dt)), p1)
# Create a rising exponential of user-provided slope
x = np.linspace(t1/dt, t2/dt, int((t2-t1)/dt))
h_rise = np.exp(rise*x)
# Evaluate a factor for scaling exponential
factor = np.max(h_rise)/(p2-p1)
h_secondary = (h_rise/factor) + p1
# Create a decaying exponential until the end time
x = np.linspace(t2/dt, t3/dt, int((t3-t2)/dt))
h_decay = (np.exp((-decay)*(x-4/dt)))
# Add the three responses
h = np.append(h_primary, h_secondary)
h = np.append(h, h_decay)
return h
def _find_inverse(self, real, imaginary):
"""
Forms complex numbers corresponding to real and imaginary
parts and finds inverse series.
Parameters
----------
real : numpy.ndarray
Co-effients corresponding to real parts of complex numbers
imaginary : numpy.ndarray
Co-efficients correspondong to imaginary parts of complex
numbers
Returns
-------
ifft : numpy.ndarray
Real inverse fourier transform of complex numbers
"""
# Form complex numbers corresponding to each frequency
f = [complex(r, i) for r, i in zip(real, imaginary)]
f = np.hstack([self.mean * self.N * self.red_noise, f])
# Obtain time series
return np.fft.irfft(f, n=self.N * self.red_noise)
def _timmerkoenig(self, pds_shape):
"""Straight application of T&K method to a PDS shape.
"""
pds_size = pds_shape.size
real = np.random.normal(size=pds_size) * np.sqrt(0.5 * pds_shape)
imaginary = np.random.normal(size=pds_size) * np.sqrt(0.5 * pds_shape)
imaginary[-1] = 0
counts = self._find_inverse(real, imaginary)
self.std = counts.std()
rescaled_counts = self._extract_and_scale(counts)
err = np.zeros_like(rescaled_counts)
if self.poisson:
bad = rescaled_counts < 0
if np.any(bad):
warnings.warn("Some bins of the light curve have counts < 0. Setting to 0")
rescaled_counts[bad] = 0
lc = Lightcurve(self.time, np.random.poisson(rescaled_counts),
err_dist='poisson', dt=self.dt, skip_checks=True)
lc.smooth_counts = rescaled_counts
else:
lc = Lightcurve(self.time, rescaled_counts,
err=err,
err_dist='gauss', dt=self.dt, skip_checks=True)
return lc
def _simulate_power_law(self, B):
"""
Generate LightCurve from a power law spectrum.
Parameters
----------
B : int
Defines the shape of power law spectrum.
Returns
-------
lightCurve : array-like
"""
# Define frequencies at which to compute PSD
w = np.fft.rfftfreq(self.red_noise*self.N, d=self.dt)[1:]
pds_shape = np.power((1/w), B)
return self._timmerkoenig(pds_shape)
def _simulate_power_spectrum(self, s):
"""
Generate a light curve from user-provided spectrum.
Parameters
----------
s : array-like
power spectrum
Returns
-------
lightCurve : `LightCurve` object
"""
# Cast spectrum as numpy array
pds_shape = np.zeros(s.size * self.red_noise)
pds_shape[:s.size] = s
return self._timmerkoenig(pds_shape)
def _simulate_model(self, model):
"""
For generating a light curve from a pre-defined model
Parameters
----------
model : astropy.modeling.Model derived function
the pre-defined model
(library-based, available in astropy.modeling.models or
custom-defined)
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
# Frequencies at which the PSD is to be computed
# (only positive frequencies, since the signal is real)
nbins = self.red_noise * self.N
simfreq = np.fft.rfftfreq(nbins, d=self.dt)[1:]
# Compute PSD from model
simpsd = model(simfreq)
return self._timmerkoenig(simpsd)
def _simulate_model_string(self, model_str, params):
"""
For generating a light curve from a pre-defined model
Parameters
----------
model_str : string
name of the pre-defined model
params : list or dictionary
parameters of the pre-defined model
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
from . import models
# Frequencies at which the PSD is to be computed
# (only positive frequencies, since the signal is real)
nbins = self.red_noise*self.N
simfreq = np.fft.rfftfreq(nbins, d=self.dt)[1:]
if model_str not in dir(models):
raise ValueError('Model is not defined!')
if isinstance(params, dict):
model = eval('models.' + model_str + '(**params)')
# Compute PSD from model
simpsd = model(simfreq)
elif isinstance(params, list):
simpsd = eval('models.' + model_str + '(simfreq, params)')
else:
raise ValueError('Params should be list or dictionary!')
return self._timmerkoenig(simpsd)
def _simulate_impulse_response(self, s, h, mode='same'):
"""
Generate LightCurve from impulse response. To get
accurate results, binning intervals (dt) of variability
signal 's' and impulse response 'h' must be equal.
Parameters
----------
s : array-like
Underlying variability signal
h : array-like
Impulse response
mode : str
mode can be 'same', 'filtered, or 'full'.
'same' indicates that the length of output light
curve is same as that of input signal.
'filtered' means that length of output light curve
is len(s) - lag_delay
'full' indicates that the length of output light
curve is len(s) + len(h) -1
Returns
-------
lightCurve : :class:`stingray.lightcurve.LightCurve` object
"""
lc = signal.fftconvolve(s, h)
if mode == 'same':
lc = lc[:-(len(h) - 1)]
elif mode == 'filtered':
lc = lc[(len(h) - 1):-(len(h) - 1)]
time = self.dt * np.arange(0.5, len(lc)) + self.tstart
err = np.zeros_like(time)
return Lightcurve(time, lc, err_dist='gauss', dt=self.dt, err=err,
skip_checks=True)
def _extract_and_scale(self, long_lc):
"""
i) Make a random cut and extract a light curve of required
length.
ii) Rescale light curve i) with zero mean and unit standard
deviation, and ii) user provided mean and rms (fractional
rms * mean)
Parameters
----------
long_lc : numpy.ndarray
Simulated lightcurve of length 'N' times 'red_noise'
Returns
-------
lc : numpy.ndarray
Normalized and extracted lightcurve of length 'N'
"""
if self.red_noise == 1:
lc = long_lc
else:
# Make random cut and extract light curve of length 'N'
extract = \
self.random_state.randint(self.N-1,
self.red_noise*self.N - self.N+1)
lc = np.take(long_lc, range(extract, extract + self.N))
mean_lc = np.mean(lc)
if self.mean == 0:
return (lc-mean_lc)/self.std * self.rms
else:
return (lc-mean_lc)/self.std * self.mean * self.rms + self.mean
def powerspectrum(self, lc, seg_size=None):
"""
Make a powerspectrum of the simulated light curve.
Parameters
----------
lc : lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
The light curve data to be Fourier-transformed.
Returns
-------
power : numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
"""
if seg_size is None:
seg_size = lc.tseg
return AveragedPowerspectrum(lc, seg_size).power
@staticmethod
def read(filename, fmt='pickle', format_=None):
"""
Reads transfer function from a 'pickle' file.
Parameters
----------
fmt : str
the format of the file to be retrieved - accepts 'pickle'.
Returns
-------
data : class instance
`TransferFunction` object
"""
if format_ is not None:
fmt = format_
if fmt == 'pickle':
with open(filename, "rb") as fobj:
return pickle.load(fobj)
else:
raise KeyError("Format not understood.")
def write(self, filename, fmt="pickle", format_=None):
"""
Writes a transfer function to 'pickle' file.
Parameters
----------
fmt : str
the format of the file to be saved - accepts 'pickle'
"""
if format_ is not None:
fmt = format_
if fmt == 'pickle':
with open(filename, "wb") as fobj:
pickle.dump(self, fobj)
else:
raise KeyError("Format not understood.")
|
{
"content_hash": "e95232bbbca7bd738f497d78a7f5b3b2",
"timestamp": "",
"source": "github",
"line_count": 632,
"max_line_length": 91,
"avg_line_length": 30.72151898734177,
"alnum_prop": 0.5488257107540173,
"repo_name": "StingraySoftware/stingray",
"id": "3f98719e04eedcff490877ccc148949374f30981",
"size": "19416",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stingray/simulator/simulator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1200124"
},
{
"name": "Python",
"bytes": "1465633"
},
{
"name": "TeX",
"bytes": "8716"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('d4s2_api', '0022_auto_20180417_1951'),
]
operations = [
migrations.AddField(
model_name='s3endpoint',
name='name',
field=models.CharField(default='default', help_text='Unique name of the s3 service', max_length=255, unique=True),
preserve_default=False,
),
]
|
{
"content_hash": "1e2a6680677f12fb1db747b6232a50ef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 126,
"avg_line_length": 25.842105263157894,
"alnum_prop": 0.6109979633401222,
"repo_name": "Duke-GCB/DukeDSHandoverService",
"id": "1440e946db123e29123af0579d617963c539f178",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "d4s2_api/migrations/0023_s3endpoint_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330"
},
{
"name": "HTML",
"bytes": "5167"
},
{
"name": "Python",
"bytes": "113326"
},
{
"name": "Shell",
"bytes": "473"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.forms.widgets import TextInput
from django_google_maps.widgets import GoogleMapsAddressWidget
from django_google_maps.fields import AddressField, GeoLocationField
from sample import models
class SampleModelAdmin(admin.ModelAdmin):
formfield_overrides = {
AddressField: {
'widget': GoogleMapsAddressWidget
},
GeoLocationField: {
'widget': TextInput(attrs={
'readonly': 'readonly'
})
},
}
admin.site.register(models.SampleModel, SampleModelAdmin)
|
{
"content_hash": "710d8a9000b5e5e9d772388f2281b168",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 25.565217391304348,
"alnum_prop": 0.6802721088435374,
"repo_name": "madisona/django-google-maps",
"id": "d6278220faff3b8786952a3b86fc3282c5cd93b5",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "112"
},
{
"name": "JavaScript",
"bytes": "5784"
},
{
"name": "Python",
"bytes": "20050"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'upload.views.index'),
(r'^convert', 'upload.views.convert'),
(r'^upload', 'upload.views.upload'),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.STATIC_DOC_ROOT, 'show_indexes': True}),
)
|
{
"content_hash": "7e855e0fc3163d476f16e1d68111dc27",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 121,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.7042889390519187,
"repo_name": "jvanveen/ppt2os",
"id": "b7398d06fd370c8a94594536cf556a08c34861ef",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "10804"
},
{
"name": "Java",
"bytes": "7270"
},
{
"name": "JavaScript",
"bytes": "323032"
},
{
"name": "Python",
"bytes": "9205"
}
],
"symlink_target": ""
}
|
import collections
class Solution(object):
def replaceWords(self, dictionary, sentence):
"""
:type dictionary: List[str]
:type sentence: str
:rtype: str
"""
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for word in dictionary:
reduce(dict.__getitem__, word, trie).setdefault("_end")
def replace(word):
curr = trie
for i, c in enumerate(word):
if c not in curr:
break
curr = curr[c]
if "_end" in curr:
return word[:i+1]
return word
return " ".join(map(replace, sentence.split()))
|
{
"content_hash": "661952dadcc57f7a61188b79150c9152",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 27.653846153846153,
"alnum_prop": 0.48956884561891517,
"repo_name": "tudennis/LeetCode---kamyu104-11-24-2015",
"id": "662f8ec0fd7983947a60540a1e97ef3dcab163e7",
"size": "1657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/replace-words.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "980817"
},
{
"name": "Go",
"bytes": "1907"
},
{
"name": "Java",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "1365305"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.