text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = 'run',
from . import coroutines
from . import events
from . import tasks
def run(main, *, debug=False):
"""Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
managing the asyncio event loop and finalizing asynchronous
generators.
This function cannot be called when another asyncio event loop is
running in the same thread.
If debug is True, the event loop will be run in debug mode.
This function always creates a new event loop and closes it at the end.
It should be used as a main entry point for asyncio programs, and should
ideally only be called once.
Example:
async def main():
await asyncio.sleep(1)
print('hello')
asyncio.run(main())
"""
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
def _cancel_all_tasks(loop):
to_cancel = tasks.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
| {
"repo_name": "xyuanmu/XX-Net",
"path": "python3.8.2/Lib/asyncio/runners.py",
"copies": "10",
"size": "2021",
"license": "bsd-2-clause",
"hash": 7605121880532272000,
"line_mean": 27.0694444444,
"line_max": 79,
"alpha_frac": 0.6150420584,
"autogenerated": false,
"ratio": 4.219206680584551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9834248738984551,
"avg_score": null,
"num_lines": null
} |
__all__ = ["runsimple"]
import sys
import webapi as web
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
| {
"repo_name": "CymaticLabs/Unity3D.Amqp",
"path": "lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/web/httpserver.py",
"copies": "1",
"size": "5071",
"license": "mit",
"hash": 1546180734391509800,
"line_mean": 39.568,
"line_max": 84,
"alpha_frac": 0.5103529876,
"autogenerated": false,
"ratio": 4.543906810035843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009661640193108447,
"num_lines": 125
} |
__all__ = ["runsimple"]
import sys, os
from SimpleHTTPServer import SimpleHTTPRequestHandler
import urllib
import posixpath
import webapi as web
import net
import utils
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
# The WSGIServer instance.
# Made global so that it can be stopped in embedded mode.
server = None
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
global server
func = StaticMiddleware(func)
func = LogMiddleware(func)
server = WSGIServer(server_address, func)
if server.ssl_adapter:
print "https://%s:%d/" % server_address
else:
print "http://%s:%d/" % server_address
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
server = None
def WSGIServer(server_address, wsgi_app):
"""Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
This function can be overwritten to customize the webserver or use a different webserver.
"""
import wsgiserver
# Default values of wsgiserver.ssl_adapters uses cherrypy.wsgiserver
# prefix. Overwriting it make it work with web.wsgiserver.
wsgiserver.ssl_adapters = {
'builtin': 'web.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'web.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
server = wsgiserver.CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
def create_ssl_adapter(cert, key):
# wsgiserver tries to import submodules as cherrypy.wsgiserver.foo.
# That doesn't work as not it is web.wsgiserver.
# Patching sys.modules temporarily to make it work.
import types
cherrypy = types.ModuleType('cherrypy')
cherrypy.wsgiserver = wsgiserver
sys.modules['cherrypy'] = cherrypy
sys.modules['cherrypy.wsgiserver'] = wsgiserver
from wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
adapter = pyOpenSSLAdapter(cert, key)
# We are done with our work. Cleanup the patches.
del sys.modules['cherrypy']
del sys.modules['cherrypy.wsgiserver']
return adapter
# SSL backward compatibility
if (server.ssl_adapter is None and
getattr(server, 'ssl_certificate', None) and
getattr(server, 'ssl_private_key', None)):
server.ssl_adapter = create_ssl_adapter(server.ssl_certificate, server.ssl_private_key)
server.nodelay = not sys.platform.startswith('java') # TCP_NODELAY isn't supported on the JVM
return server
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
try:
path = self.translate_path(self.path)
etag = '"%s"' % os.path.getmtime(path)
client_etag = environ.get('HTTP_IF_NONE_MATCH')
self.send_header('ETag', etag)
if etag == client_etag:
self.send_response(304, "Not Modified")
self.start_response(self.status, self.headers)
raise StopIteration
except OSError:
pass # Probably a 404
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class StaticMiddleware:
"""WSGI middleware for serving static files."""
def __init__(self, app, prefix='/static/'):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
path = self.normpath(path)
if path.startswith(self.prefix):
return StaticApp(environ, start_response)
else:
return self.app(environ, start_response)
def normpath(self, path):
path2 = posixpath.normpath(urllib.unquote(path))
if path.endswith("/"):
path2 += "/"
return path2
class LogMiddleware:
"""WSGI middleware for logging the status."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
from BaseHTTPServer import BaseHTTPRequestHandler
import StringIO
f = StringIO.StringIO()
class FakeSocket:
def makefile(self, *a):
return f
# take log_date_time_string method from BaseHTTPRequestHandler
self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
self.log(status, environ)
return out
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print >> outfile, utils.safestr(msg)
| {
"repo_name": "mttpla/elbaBridge",
"path": "elbaBridgeServer/web/httpserver.py",
"copies": "67",
"size": "11487",
"license": "apache-2.0",
"hash": -8785179657475508000,
"line_mean": 35.0094043887,
"line_max": 105,
"alpha_frac": 0.5598502655,
"autogenerated": false,
"ratio": 4.366020524515394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["runsimple"]
import sys, os
from SimpleHTTPServer import SimpleHTTPRequestHandler
import webapi as web
import net
import utils
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
func = StaticMiddleware(func)
func = LogMiddleware(func)
server = WSGIServer(server_address, func)
print "http://%s:%d/" % server_address
try:
server.start()
except KeyboardInterrupt:
server.stop()
def WSGIServer(server_address, wsgi_app):
"""Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
This function can be overwritten to customize the webserver or use a different webserver.
"""
from wsgiserver import CherryPyWSGIServer
return CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class StaticMiddleware:
"""WSGI middleware for serving static files."""
def __init__(self, app, prefix='/static/'):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
if path.startswith(self.prefix):
return StaticApp(environ, start_response)
else:
return self.app(environ, start_response)
class LogMiddleware:
"""WSGI middleware for logging the status."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
from BaseHTTPServer import BaseHTTPRequestHandler
import StringIO
f = StringIO.StringIO()
class FakeSocket:
def makefile(self, *a):
return f
# take log_date_time_string method from BaseHTTPRequestHandler
self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
self.log(status, environ)
return out
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print >> outfile, utils.safestr(msg)
| {
"repo_name": "thunderace/newtifry",
"path": "appengine/web.0.34/httpserver.py",
"copies": "18",
"size": "9100",
"license": "apache-2.0",
"hash": 676182498289640100,
"line_mean": 35.4,
"line_max": 105,
"alpha_frac": 0.5435164835,
"autogenerated": false,
"ratio": 4.398260028999517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010892902358741836,
"num_lines": 250
} |
__all__ = ["runsimple"]
import sys, os
import webapi as web
import net
import utils
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/Tests/') or \
self.path.startswith('/Extras/') or \
self.path.startswith('/Examples/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
from wsgiserver import CherryPyWSGIServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import BaseHTTPRequestHandler
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class WSGIWrapper(BaseHTTPRequestHandler):
"""WSGI wrapper for logging the status and serving static files."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
write = start_response(status, response_headers, *args)
self.log(status, environ)
return write
path = environ.get('PATH_INFO', '')
if path.startswith('/Tests/') or \
path.startswith('/Extras/') or \
path.startswith('/Examples/'):
return StaticApp(environ, xstart_response)
else:
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
#@@ It is really bad to extend from
#@@ BaseHTTPRequestHandler just for this method
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print >> outfile, utils.safestr(msg)
func = WSGIWrapper(func)
server = CherryPyWSGIServer(server_address, func, server_name="localhost")
print "http://%s:%d/" % server_address
try:
server.start()
except KeyboardInterrupt:
server.stop()
| {
"repo_name": "bsmith3541/rapcollab",
"path": "webpy/web/httpserver.py",
"copies": "13",
"size": "8744",
"license": "mit",
"hash": 179747703592387330,
"line_mean": 37.1834061135,
"line_max": 84,
"alpha_frac": 0.519441903,
"autogenerated": false,
"ratio": 4.547061882475299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["runsimple"]
import sys, os
from SimpleHTTPServer import SimpleHTTPRequestHandler
import webapi as web
import net
import utils
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> web.debug, traceback.format_exc(),
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
func = StaticMiddleware(func)
func = LogMiddleware(func)
server = WSGIServer(server_address, func)
print "http://%s:%d/" % server_address
try:
server.start()
except KeyboardInterrupt:
server.stop()
def WSGIServer(server_address, wsgi_app):
"""Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
This function can be overwritten to customize the webserver or use a different webserver.
"""
from wsgiserver import CherryPyWSGIServer
return CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class StaticMiddleware:
"""WSGI middleware for serving static files."""
def __init__(self, app, prefix='/static/'):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
if path.startswith(self.prefix):
return StaticApp(environ, start_response)
else:
return self.app(environ, start_response)
class LogMiddleware:
"""WSGI middleware for logging the status."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
from BaseHTTPServer import BaseHTTPRequestHandler
import StringIO
f = StringIO.StringIO()
class FakeSocket:
def makefile(self, *a):
return f
# take log_date_time_string method from BaseHTTPRequestHandler
self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
self.log(status, environ)
return out
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print >> outfile, utils.safestr(msg)
| {
"repo_name": "ProfessionalIT/customers",
"path": "persianas_paludo/src/webapps/web/httpserver.py",
"copies": "1",
"size": "9350",
"license": "mit",
"hash": -1077894138232056600,
"line_mean": 35.4,
"line_max": 105,
"alpha_frac": 0.5289839572,
"autogenerated": false,
"ratio": 4.4566253574833175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010024571154153733,
"num_lines": 250
} |
__all__ = ['runtime', 'pluginBundle', 'registerPlugin']
class Runtime:
"""
Backward compatibility interface.
This class provides (partial) support for the interface of
older versions of PyObjC.
"""
def __getattr__(self, name):
import warnings
warnings.warn("Deprecated: use objc.lookUpClass",
DeprecationWarning)
import objc
if name == '__objc_classes__':
return objc.getClassList()
elif name == '__kind__':
return 'python'
try:
return objc.lookUpClass(name)
except objc.nosuchclass_error:
raise AttributeError, name
def __eq__(self, other):
return self is other
def __repr__(self):
return "objc.runtime"
runtime = Runtime()
_PLUGINS = {}
def registerPlugin(pluginName):
"""
Deprecated: use currentBundle()
Register the current py2app plugin by name and return its bundle
"""
import os
import sys
path = os.path.dirname(os.path.dirname(os.environ['RESOURCEPATH']))
if not isinstance(path, unicode):
path = unicode(path, sys.getfilesystemencoding())
_PLUGINS[pluginName] = path
return pluginBundle(pluginName)
def pluginBundle(pluginName):
"""
Deprecated: use currentBundle()
Return the main bundle for the named plugin. This should be used
only after it has been registered with registerPlugin
"""
import warnings
warnings.warn("Deprecated: use currentBundle()", DeprecationWarning)
from Foundation import NSBundle
return NSBundle.bundleWithPath_(_PLUGINS[pluginName])
| {
"repo_name": "rays/ipodderx-core",
"path": "objc/_compat.py",
"copies": "1",
"size": "1627",
"license": "mit",
"hash": -7746436694042057000,
"line_mean": 27.0517241379,
"line_max": 72,
"alpha_frac": 0.6441303012,
"autogenerated": false,
"ratio": 4.421195652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5565325953373913,
"avg_score": null,
"num_lines": null
} |
__all__ = ['rvm']
import numpy as np
from scipy.linalg import cholesky
import pdb
from pyandres import cholInvert
class rvm(object):
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Constructor
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def __init__(self, basis, targets, noise=None):
"""
Class that implements a Relevance Vector Machine (RVM) as described in Tipping, M. E. and A. C. Faul (2003)
The RVM does regression using a linear function with a potentially very large N functions using
a sparsity constraint on the vector w_i
y(x) = w_1*phi_1(x)+w_2*phi_2(x)+...+w_N*phi_N(x)
Instantiate the class using
p = rvm(Basis, Outputs, noise=0.018)
- Outputs: value of the function at nPoints locations x_i
- Basis: matrix of size [nPoints x nFunctions] of the nFunctions evaluated at the x_i locations of the observations
- noise: optional parameter to give an estimation of the noise standard deviation. It can be absent and the noise variance
will be estimated
The RVM is fitted using
p.iterateUntilConvergence()
or individual iterations can be carried out using
p.iteration()
The value of the weights w_i is returned in p.wInferred, so that the function evaluated at the points is
given by
np.dot(Basis, p.wInferred)
You can also evaluate the Basis functions in a finer x_i grid to produce a smoother regressor
"""
self.basis = 1.0*basis
self.targets = np.reshape(targets, (targets.size,1))
self.noise = noise
# A "reasonable" initial value for the noise in the Gaussian case
self.gaussianSNRInit = 0.1
# "Reasonable" initial alpha bounds
self.initAlphaMax = 1.0e3
self.initAlphaMin = 1.0e-3
# If noise standard deviation is given, compute beta from the noise standard deviation
if (noise is not None):
self.beta = 1.0 / noise**2
# Initialize basis (phi), mu and alpha
# First compute linearised output for use in heuristic initialization
self.TargetsPseudoLinear = self.targets
# BASIS PREPROCESSING:
# Scale basis vectors to unit norm. This eases some calculations and
# will improve numerical robustness later.
self.preprocessBasis()
self.initialization()
# Cache some quantities
self.basisPHI = np.dot(self.basis.T, self.PHI)
self.basisTargets = np.dot(self.basis.T, self.targets)
# Full computation
#
# Initialise with a full explicit computation of the statistics
#
# NOTE: The AISTATS paper uses "S/Q" (upper case) to denote the key
# "sparsity/quality" Factors for "included" basis functions, and "s/q"
# (lower case) for the factors calculated when the relevant basis
# functions are "excluded".
#
# Here, for greater clarity:
#
# All S/Q are denoted by vectors S_in, Q_in
# All s/q are denoted by vectors S_out, Q_out
self.fullStatistics()
self.N, self.MFull = self.basis.shape
self.M = self.PHI.shape[1]
self.addCount = 0
self.deleteCount = 0
self.updateCount = 0
self.maxLogLike = -1.e10
self.controls = {'BetaUpdateStart': 10, 'BetaUpdateFrequency': 5, 'ZeroFactor': 1.e-12, 'PriorityAddition': 0,
'PriorityDeletion': 1, 'AlignmentMax': 0.999e0, 'MinDeltaLogAlpha': 1.e-3}
self.logMarginalLog = np.asarray([self.logML])
self.countLoop = 0
self.alignDeferCount = 0
self.loop = 0
self.lastIteration = False
self.actionReestimate = 0
self.actionAdd = 1
self.actionDelete = -1
self.actionTerminate = 10
self.actionNoiseOnly = 11
self.actionAlignmentSkip = 12
self.selectedAction = 0
self.alignedOut = []
self.alignedIn = []
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initialization
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def initialization(self):
# 1) the starting basis, PHI
# Take account of "free basis": it needs to be included from the outset
# Set initial basis to be the largest projection with the targets
proj = np.dot(self.basis.T, self.TargetsPseudoLinear)
self.Used = np.atleast_1d(np.argmax(abs(proj)))
foo = proj[self.Used]
self.PHI = np.atleast_2d(self.basis[:,self.Used])
M = self.Used.shape
# 2) the most probable weights
# mu will be calculated analytically later in the Gaussian case
# mu = []
# 3) the hyperparameters
# Exact for single basis function case (diag irrelevant),
# heuristic in the multiple case
p = np.diag(np.dot(self.PHI.T, self.PHI)) * self.beta
q = np.dot(self.PHI.T, self.targets) * self.beta
self.Alpha = p**2 /(q**2-p)
# Its possible that one of the basis functions could already be irrelevant (alpha<0), so trap that
ind = np.where(self.Alpha < 0.0)[0]
if (ind.shape[0] != 0):
self.Alpha[ind] = self.initAlphaMax
print "Initial alpha = {0}".format(self.Alpha)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Pre-process basis
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def preprocessBasis(self):
N, M = self.basis.shape
self.scales = np.sqrt(np.sum(self.basis**2,0))
self.scales[self.scales == 0] = 0.0
for i in range(M):
self.basis[:,i] = self.basis[:,i] / self.scales[i]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Compute the full statistics
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def fullStatistics(self):
N, MFull = self.basis.shape
M = self.PHI.shape[1]
# Cholesky decomposition to carry out the inverse and compute Sigma matrix
U = cholesky(np.dot(self.PHI.T,self.PHI) * self.beta + np.diag(np.squeeze(self.Alpha, axis=1)), lower=False)
Ui = np.linalg.inv(U)
self.Sigma = np.dot(Ui, Ui.T)
# Posterior mean
self.Mu = np.dot(self.Sigma, np.dot(self.PHI.T, self.targets)) * self.beta
self.Mu = np.reshape(self.Mu,(self.Mu.size,1))
# Data error and likelihood
y = np.dot(self.PHI, self.Mu)
e = (self.targets - y)
ED = np.dot(e.T, e)
dataLikelihood = 0.5 * (N * np.log(self.beta) - self.beta * ED)
# Compute the log-marginal posterior
logDetHOver2 = np.sum(np.log(np.diag(U)))
self.logML = dataLikelihood - 0.5 * np.dot(self.Mu.T**2, self.Alpha) + 0.5 * np.sum(np.log(self.Alpha)) - logDetHOver2
# Well-determinedness factors
diagC = np.sum(Ui**2, 1)
self.Gamm = 1.0 - self.Alpha * diagC
# Compute Q & S values
# Q: "quality" factor - related to how well the basis function contributes
# to reducing the error
# S: "sparsity factor" - related to how orthogonal a given basis function
# is to the currently used set of basis functions
self.betaBasisPHI = np.dot(self.basis.T, self.PHI * self.beta * np.ones((1,M)))
self.SIn = self.beta - np.sum(np.dot(self.betaBasisPHI,Ui)**2,1,keepdims=True)
self.QIn = self.beta * (self.basisTargets - np.dot(self.basisPHI, self.Mu))
self.SOut = 1*self.SIn
self.QOut = 1*self.QIn
self.SOut[self.Used] = (self.Alpha * self.SIn[self.Used]) / (self.Alpha - self.SIn[self.Used])
self.QOut[self.Used] = (self.Alpha * self.QIn[self.Used]) / (self.Alpha - self.SIn[self.Used])
self.factor = (self.QOut**2 - self.SOut)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# One iteration
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def oneIteration(self):
#*****************
# Decision phase
#*****************
# Compute change in likelihood
deltaML = np.zeros((self.MFull, 1))
action = self.actionReestimate * np.ones((self.MFull,1))
usedFactor = self.factor[self.Used]
M = self.PHI.shape[1]
# Reestimation
iu = np.where(usedFactor > self.controls['ZeroFactor'])[0]
index = self.Used[iu]
newAlpha = self.SOut[index]**2 / self.factor[index]
delta = 1.0 / newAlpha - 1.0 / self.Alpha[iu]
# Quick computation of change in log-likelihood given all re-estimations
deltaML[index] = 0.5 * (delta * self.QIn[index]**2 / (delta * self.SIn[index]+1.0) - np.log(1.0+self.SIn[index]*delta))
# Deletion
iu = np.where(usedFactor <= self.controls['ZeroFactor'])[0]
index = self.Used[iu]
anyToDelete = (not index.size == 0) and M > 1
if (anyToDelete):
deltaML[index] = -0.5 * (self.QOut[index]**2 / (self.SOut[index] + self.Alpha[iu]) - np.log(1.0+self.SOut[index] / self.Alpha[iu]))
action[index] = self.actionDelete
# Addition
GoodFactor = self.factor > self.controls['ZeroFactor']
GoodFactor[self.Used] = 0
GoodFactor[self.alignedOut] = 0
index = np.where(GoodFactor)[0]
anyToAdd = (not index.size == 0)
if (anyToAdd):
# Quick computation of change in log-likelihood
quot = self.QIn[index]**2 / self.SIn[index]
deltaML[index] = 0.5 * (quot - 1.0 - np.log(quot))
action[index] = self.actionAdd
if (anyToAdd and self.controls['PriorityAddition']) or (anyToDelete and self.controls['PriorityDeletion']):
# We won't perform re-estimation this iteration
deltaML[action == self.actionReestimate] = 0
# We should enforce add if preferred and delete
if (anyToAdd and self.controls['PriorityAddition'] and (not self.controls['PriorityDeletion'])):
deltaML[action == self.actionDelete] = 0
if (anyToDelete and self.controls['PriorityDeletion'] and (not self.controls['PriorityAddition'])):
deltaML[action == self.actionAdd] = 0
# Finally, choose the action that results in the greatest change in likelihood
nu = np.atleast_1d(np.argmax(deltaML))
self.deltaLogMarginal = deltaML[nu]
self.selectedAction = action[nu]
anyWorthWhileAction = self.deltaLogMarginal > 0
# If basis nu is already in the model, find its index
j = []
if (self.selectedAction == self.actionReestimate) or (self.selectedAction == self.actionDelete):
j = np.where(self.Used == nu)[0]
self.Phi = np.atleast_2d(self.basis[:,nu])
newAlpha = self.SOut[nu]**2 / self.factor[nu]
change = np.abs(np.log(newAlpha) - np.log(self.Alpha[j]))
if (not anyWorthWhileAction) or ((self.selectedAction == self.actionReestimate) and (change < self.controls['MinDeltaLogAlpha']) and (not anyToDelete)):
self.selectedAction = self.actionTerminate
# Alignment checks for addition
if (self.selectedAction == self.actionAdd):
p = np.dot(self.Phi.T, self.PHI)
findAligned = np.where(p > self.controls['AlignmentMax'])[0]
numAligned = findAligned.size
if (numAligned > 0):
# The added basis function is effectively indistinguishable from one present already
self.selectedAction = self.actionAlignmentSkip
alignDeferCount += 1
# Take note not to try this again
self.alignedOut = np.append(self.alignedOut, nu*np.ones((numAligned,1)))
self.alignedIn = np.append(self.alignedIn, self.Used[findAligned])
# Alignment checks for deletion
if (self.selectedAction == self.actionDelete):
findAligned = np.where(self.alignedIn == nu)[0]
numAligned = findAligned.size
if (numAligned > 0):
reinstated = self.alignedOut[findAligned]
self.alignedIn = np.delete(self.alignedIn, findAligned, 0)
self.alignedOut = np.delete(self.alignedOut, findAligned, 0)
#*****************
# Action phase
#*****************
updateRequired = False
if (self.selectedAction == self.actionReestimate):
# Basis function nu is already in the model and we're reeestimatig its alpha
oldAlpha = self.Alpha[j]
self.Alpha[j] = newAlpha
S_j = self.Sigma[:,j]
deltaInv = 1.0 / (newAlpha - oldAlpha)
kappa = 1.0 / (self.Sigma[j,j] + deltaInv)
tmp = kappa * S_j
newSigma = self.Sigma - np.dot(tmp, S_j.T)
deltaMu = -self.Mu[j] * tmp
self.Mu += deltaMu
self.SIn += kappa * np.dot(self.betaBasisPHI, S_j)**2
self.QIn -= np.dot(self.betaBasisPHI, deltaMu)
self.updateCount += 1
updateRequired = True
elif (self.selectedAction == self.actionAdd):
self.basisPhi = np.dot(self.basis.T, self.Phi)
self.basisPHI = np.hstack((self.basisPHI, self.basisPhi))
self.BPhi = self.beta * self.Phi
self.BASISBPhi = self.beta * self.basisPhi
tmp = np.dot(np.dot(self.BPhi.T, self.PHI), self.Sigma).T
self.Alpha = np.vstack((self.Alpha,newAlpha))
self.PHI = np.hstack((self.PHI, self.Phi))
s_ii = 1.0 / (newAlpha + self.SIn[nu])
s_i = -s_ii * tmp
TAU = -np.dot(s_i, tmp.T)
t1 = np.hstack((self.Sigma+TAU, s_i))
t2 = np.hstack((s_i.T, s_ii))
newSigma = np.vstack((t1,t2))
mu_i = s_ii * self.QIn[nu]
deltaMu = np.vstack((-mu_i*tmp, mu_i))
self.Mu = np.vstack((self.Mu, 0)) + deltaMu
mCi = self.BASISBPhi - np.dot(self.betaBasisPHI, tmp)
self.SIn -= s_ii * mCi**2
self.QIn -= mu_i * mCi
self.Used = np.hstack((self.Used, nu))
self.addCount += 1
updateRequired = True
elif (self.selectedAction == self.actionDelete):
self.basisPHI = np.delete(self.basisPHI, j, 1)
self.PHI = np.delete(self.PHI, j, 1)
self.Alpha = np.delete(self.Alpha, j, 0)
s_jj = self.Sigma[j,j]
s_j = self.Sigma[:,j]
tmp = s_j / s_jj
newSigma = self.Sigma - np.dot(tmp, s_j.T)
newSigma = np.delete(newSigma, j, 0)
newSigma = np.delete(newSigma, j, 1)
deltaMu = -self.Mu[j] * tmp
mu_j = self.Mu[j]
self.Mu += deltaMu
self.Mu = np.delete(self.Mu, j, 0)
jPm = np.dot(self.betaBasisPHI, s_j)
self.SIn += jPm**2 / s_jj
self.QIn += jPm * mu_j / s_jj
self.Used = np.delete(self.Used, j, 0)
self.deleteCount += 1
updateRequired = True
M = len(self.Used)
#if (updateRequired):
#self.SOut[:] = self.SIn
#self.QOut[:] = self.QIn
#tmp = self.Alpha / (self.Alpha - self.SIn[self.Used])
#self.SOut[self.Used] = tmp * self.SIn[self.Used]
#self.QOut[self.Used] = tmp * self.QIn[self.Used]
#self.factor = (self.QOut * self.QOut - self.SOut)
#self.Sigma = newSigma
#self.Gamm = 1.0 - self.Alpha * np.atleast_2d(np.diag(self.Sigma)).T
#self.betaBasisPHI = self.beta * self.basisPHI
self.fullStatistics()
self.countLoop += 1
self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
# Something went wrong. Recompute statistics
if (np.sum(self.Gamm) < 0):
self.fullStatistics()
# Recompute noise if not given
if (self.noise is None) and ((self.selectedAction == self.actionTerminate) or (self.loop <= self.controls['BetaUpdateStart']) or
(self.loop % self.controls['BetaUpdateFrequency'] == 0)):
# Gaussian noise estimate
betaZ1 = beta
y = np.dot(self.PHI, self.Mu)
e = self.targets - y
beta = (N - np.sum(self.Gamm)) / np.dot(e.T, e)
# Work-around zero-noise issue
beta = np.amin([beta, 1.e6 / np.variance(self.targets)])
deltaLogBeta = np.log(beta) - np.log(betaZ1)
# Full re-computation of statistics after beta update
if (np.abs(deltaLogBeta) > 1.e-6):
self.fullStatistics()
self.countLoop += 1
self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
if (self.selectedAction == self.actionTerminate):
self.selectedAction = self.actionNoiseOnly
print "Noise update. Termination deferred"
#self.AInv = np.diag(1.0/self.Alpha[:,0])
#Sigma = 1.0/self.beta * np.identity(self.N) + np.dot(np.dot(self.PHI, self.AInv), self.PHI.T)
#CInv, logD = cholInvert(Sigma)
#logL = -0.5*logD - 0.5*np.dot(np.dot(self.targets.T,CInv),self.targets)
print "{0:4d} - L={1:10.7f} - Gamma={2:10.7f} (M={3:4d}) - s={4:6.4f}".format(self.loop,self.logML[0][0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta))
#self.fullStatistics()
if (self.selectedAction == self.actionTerminate):
print "Stopping at iteration {0} - max_delta_ml={1}".format(self.loop, self.deltaLogMarginal[0,0])
print "L={0} - Gamma={1} (M={2}) - s={3}".format(self.logML[0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta))
iterationLimit = self.loop == 200
self.lastIteration = iterationLimit
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Iterate until convergence
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def iterateUntilConvergence(self):
while (not self.lastIteration and self.selectedAction != self.actionTerminate):
self.loop += 1
self.oneIteration()
p.postProcess()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Post-process
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def postProcess(self):
#self.fullStatistics()
self.index = np.argsort(self.Used)
self.relevant = self.Used[self.index]
self.value = self.Mu[self.index] / np.atleast_2d(self.scales[self.Used[self.index]]).T
self.Alpha = self.Alpha[self.index] / (np.atleast_2d(self.scales[self.Used[self.index]]).T)**2
self.wInferred = np.zeros((self.MFull,1))
self.wInferred[self.relevant] = self.value
#***************
# Test
#***************
import matplotlib.pyplot as pl
N = 10
M = 11
XStenflo = np.asarray([-2.83000,-1.18000,0.870000,1.90000,3.96000,5.01000,6.25000,8.10000,9.98000,12.1200]).T
Outputs = np.asarray([0.0211433,0.0167467,0.00938793,0.0183543,-0.00285475,-0.000381000,0.00374350,0.000126900,0.0121750,0.0268133]).T
# Define the basis functions. Gaussians of width 3.4 evaluated at the observed points
basisWidth = 3.4
C = XStenflo[:,np.newaxis]
Basis = np.exp(-(XStenflo-C)**2 / basisWidth**2)
Basis = np.hstack([Basis, np.ones((1,N)).T])
# Instantitate the RVM object and train it
p = rvm(Basis, Outputs, noise=0.018)
p.iterateUntilConvergence()
# Do some plots
f = pl.figure(num=0)
ax = f.add_subplot(1,1,1)
ax.plot(XStenflo, Outputs, 'ro')
ax.plot(XStenflo, np.dot(Basis, p.wInferred))
| {
"repo_name": "aasensio/rvmMilne",
"path": "rvmOrig.py",
"copies": "1",
"size": "17318",
"license": "mit",
"hash": -4705856686013648000,
"line_mean": 33.2930693069,
"line_max": 155,
"alpha_frac": 0.6395657697,
"autogenerated": false,
"ratio": 2.727244094488189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38668098641881893,
"avg_score": null,
"num_lines": null
} |
__all__ = ['rvm']
import numpy as np
#import pdb
#from pyandres import cholInvert
class rvm(object):
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Constructor
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def __init__(self, basis, targets, noise=None):
"""
Class that implements a Relevance Vector Machine (RVM) as described in Tipping, M. E. and A. C. Faul (2003)
The RVM does regression using a linear function with a potentially very large N functions using
a sparsity constraint on the vector w_i
y(x) = w_1*phi_1(x)+w_2*phi_2(x)+...+w_N*phi_N(x)
Instantiate the class using
p = rvm(Basis, Outputs, noise=0.018)
- Outputs: value of the function at nPoints locations x_i
- Basis: matrix of size [nPoints x nFunctions] of the nFunctions evaluated at the x_i locations of the observations
- noise: optional parameter to give an estimation of the noise standard deviation. It can be absent and the noise variance
will be estimated
The RVM is fitted using
p.iterateUntilConvergence()
or individual iterations can be carried out using
p.iteration()
The value of the weights w_i is returned in p.wInferred, so that the function evaluated at the points is
given by
np.dot(Basis, p.wInferred)
You can also evaluate the Basis functions in a finer x_i grid to produce a smoother regressor
"""
self.basis = 1.0*basis
self.targets = targets
self.noise = noise
# A "reasonable" initial value for the noise in the Gaussian case
self.gaussianSNRInit = 0.1
# "Reasonable" initial alpha bounds
self.initAlphaMax = 1.0e3
self.initAlphaMin = 1.0e-3
# If noise standard deviation is given, compute beta from the noise standard deviation
if (noise is not None):
self.beta = 1.0 / noise**2
# Initialize basis (phi), mu and alpha
# First compute linearised output for use in heuristic initialization
self.TargetsPseudoLinear = self.targets
# BASIS PREPROCESSING:
# Scale basis vectors to unit norm. This eases some calculations and
# will improve numerical robustness later.
self.preprocessBasis()
self.initialization()
# Cache some quantities
self.basisPHI = np.dot(self.basis.T, self.PHI)
self.basisTargets = np.dot(self.basis.T, self.targets)
# Full computation
#
# Initialise with a full explicit computation of the statistics
#
# NOTE: The AISTATS paper uses "S/Q" (upper case) to denote the key
# "sparsity/quality" Factors for "included" basis functions, and "s/q"
# (lower case) for the factors calculated when the relevant basis
# functions are "excluded".
#
# Here, for greater clarity:
#
# All S/Q are denoted by vectors S_in, Q_in
# All s/q are denoted by vectors S_out, Q_out
self.fullStatistics()
self.N, self.MFull = self.basis.shape
self.M = self.PHI.shape[1]
self.addCount = 0
self.deleteCount = 0
self.updateCount = 0
self.maxLogLike = -1.e10
self.controls = {'BetaUpdateStart': 10, 'BetaUpdateFrequency': 5, 'ZeroFactor': 1.e-12, 'PriorityAddition': 0,
'PriorityDeletion': 1, 'AlignmentMax': 0.999e0, 'MinDeltaLogAlpha': 1.e-3}
self.logMarginalLog = np.asarray([self.logML])
self.countLoop = 0
self.alignDeferCount = 0
self.loop = 0
self.lastIteration = False
self.actionReestimate = 0
self.actionAdd = 1
self.actionDelete = -1
self.actionTerminate = 10
self.actionNoiseOnly = 11
self.actionAlignmentSkip = 12
self.selectedAction = 0
self.alignedOut = []
self.alignedIn = []
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initialization
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def initialization(self):
# 1) the starting basis, PHI
# Take account of "free basis": it needs to be included from the outset
# Set initial basis to be the largest projection with the targets
proj = np.dot(self.basis.T, self.TargetsPseudoLinear)
self.Used = np.atleast_1d(np.argmax(abs(proj)))
foo = proj[self.Used]
self.PHI = np.atleast_2d(self.basis[:,self.Used])
M = self.Used.shape
# 2) the most probable weights
# mu will be calculated analytically later in the Gaussian case
# mu = []
# 3) the hyperparameters
# Exact for single basis function case (diag irrelevant),
# heuristic in the multiple case
p = np.diag(np.dot(self.PHI.T, self.PHI)) * self.beta
q = np.dot(self.PHI.T, self.targets) * self.beta
self.Alpha = np.asarray([p**2 /(q**2-p)])
# Its possible that one of the basis functions could already be irrelevant (alpha<0), so trap that
ind = np.where(self.Alpha < 0.0)[0]
if (ind.shape[0] != 0):
self.Alpha[ind] = self.initAlphaMax
print "Initial alpha = {0}".format(self.Alpha)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Pre-process basis
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def preprocessBasis(self):
N, M = self.basis.shape
self.scales = np.sqrt(np.sum(self.basis**2,0))
self.scales[self.scales == 0] = 0.0
for i in range(M):
self.basis[:,i] = self.basis[:,i] / self.scales[i]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Compute the full statistics
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def fullStatistics(self):
N, MFull = self.basis.shape
M = self.PHI.shape[1]
# Cholesky decomposition to carry out the inverse and compute Sigma matrix
U = np.linalg.cholesky(np.dot(self.PHI.T,self.PHI) * self.beta + np.diag(self.Alpha))
Ui = np.linalg.inv(U)
#pdb.set_trace()
self.Sigma = np.dot(Ui, Ui.T)
# Posterior mean
self.Mu = np.dot(self.Sigma, np.dot(self.PHI.T, self.targets)) * self.beta
# Data error and likelihood
y = np.dot(self.PHI, self.Mu)
e = (self.targets - y)
ED = np.dot(e.T, e)
dataLikelihood = 0.5 * (N * np.log(self.beta) - self.beta * ED)
# Compute the log-marginal posterior
logDetHOver2 = np.sum(np.log(np.diag(U)))
self.logML = dataLikelihood - 0.5 * np.dot(self.Mu.T**2, self.Alpha) + 0.5 * np.sum(np.log(self.Alpha)) - logDetHOver2
# Well-determinedness factors
diagC = np.sum(Ui**2, 1)
self.Gamm = 1.0 - self.Alpha * diagC
#pdb.set_trace()
# Compute Q & S values
# Q: "quality" factor - related to how well the basis function contributes
# to reducing the error
# S: "sparsity factor" - related to how orthogonal a given basis function
# is to the currently used set of basis functions
self.betaBasisPHI = np.dot(self.basis.T, self.PHI * self.beta * np.ones((1,M)))
self.SIn = self.beta - np.sum(np.dot(self.betaBasisPHI,Ui)**2,1,keepdims=True)
self.QIn = np.atleast_2d(self.beta * (self.basisTargets - np.dot(self.basisPHI, self.Mu))).T
self.SOut = 1*self.SIn
self.QOut = 1*self.QIn
self.SOut[self.Used] = (self.Alpha * self.SIn[self.Used]) / (self.Alpha - self.SIn[self.Used])
self.QOut[self.Used] = (self.Alpha * self.QIn[self.Used]) / (self.Alpha - self.SIn[self.Used])
self.factor = (self.QOut**2 - self.SOut)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# One iteration
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def oneIteration(self):
#*****************
# Decision phase
#*****************
# Compute change in likelihood
deltaML = np.zeros((self.MFull, 1))
action = self.actionReestimate * np.ones((self.MFull,1))
usedFactor = self.factor[self.Used]
N, M = self.PHI.shape
# Reestimation
iu = np.where(usedFactor > self.controls['ZeroFactor'])[0]
index = self.Used[iu]
newAlpha = self.SOut[index]**2 / self.factor[index]
delta = 1.0 / newAlpha - 1.0 / self.Alpha[iu]
# Quick computation of change in log-likelihood given all re-estimations
deltaML[index] = 0.5 * (delta * self.QIn[index]**2 / (delta * self.SIn[index]+1.0) - np.log(1.0+self.SIn[index]*delta))
# Deletion
iu = np.where(usedFactor <= self.controls['ZeroFactor'])[0]
index = self.Used[iu]
anyToDelete = (not index.size == 0) and M > 1
if (anyToDelete):
deltaML[index] = -0.5 * (self.QOut[index]**2 / (self.SOut[index] + self.Alpha[iu]) - np.log(1.0+self.SOut[index] / self.Alpha[iu]))
action[index] = self.actionDelete
# Addition
GoodFactor = self.factor > self.controls['ZeroFactor']
GoodFactor[self.Used] = 0
GoodFactor[self.alignedOut] = 0
index = np.where(GoodFactor)[0]
anyToAdd = (not index.size == 0)
if (anyToAdd):
# Quick computation of change in log-likelihood
quot = self.QIn[index]**2 / self.SIn[index]
deltaML[index] = 0.5 * (quot - 1.0 - np.log(quot))
action[index] = self.actionAdd
if (anyToAdd and self.controls['PriorityAddition']) or (anyToDelete and self.controls['PriorityDeletion']):
# We won't perform re-estimation this iteration
deltaML[action == self.actionReestimate] = 0
# We should enforce add if preferred and delete
if (anyToAdd and self.controls['PriorityAddition'] and (not self.controls['PriorityDeletion'])):
deltaML[action == self.actionDelete] = 0
if (anyToDelete and self.controls['PriorityDeletion'] and (not self.controls['PriorityAddition'])):
deltaML[action == self.actionAdd] = 0
# Finally, choose the action that results in the greatest change in likelihood
nu = np.atleast_1d(np.argmax(deltaML))
self.deltaLogMarginal = deltaML[nu]
self.selectedAction = action[nu]
anyWorthWhileAction = self.deltaLogMarginal > 0
# If basis nu is already in the model, find its index
j = []
if (self.selectedAction == self.actionReestimate) or (self.selectedAction == self.actionDelete):
j = np.where(self.Used == nu)[0]
self.Phi = np.atleast_2d(self.basis[:,nu])
newAlpha = self.SOut[nu]**2 / self.factor[nu]
change = np.abs(np.log(newAlpha) - np.log(self.Alpha[j]))
if (not anyWorthWhileAction) or ((self.selectedAction == self.actionReestimate) and (change < self.controls['MinDeltaLogAlpha']) and (not anyToDelete)):
self.selectedAction = self.actionTerminate
# Alignment checks for addition
if (self.selectedAction == self.actionAdd):
p = np.dot(self.Phi.T, self.PHI)
findAligned = np.where(p > self.controls['AlignmentMax'])[0]
numAligned = findAligned.size
if (numAligned > 0):
# The added basis function is effectively indistinguishable from one present already
self.selectedAction = self.actionAlignmentSkip
self.alignDeferCount += 1
# Take note not to try this again
self.alignedOut = np.append(self.alignedOut, nu*np.ones((numAligned,1))).astype(int)
self.alignedIn = np.append(self.alignedIn, self.Used[findAligned])
# Alignment checks for deletion
if (self.selectedAction == self.actionDelete):
findAligned = np.where(self.alignedIn == nu)[0]
numAligned = findAligned.size
if (numAligned > 0):
reinstated = self.alignedOut[findAligned]
self.alignedIn = np.delete(self.alignedIn, findAligned, 0)
self.alignedOut = np.delete(self.alignedOut, findAligned, 0)
#*****************
# Action phase
#*****************
updateRequired = False
if (self.selectedAction == self.actionReestimate):
# Basis function nu is already in the model and we're reeestimatig its alpha
oldAlpha = self.Alpha[j]
self.Alpha[j] = newAlpha
S_j = self.Sigma[:,j]
deltaInv = 1.0 / (newAlpha - oldAlpha)
kappa = 1.0 / (self.Sigma[j,j] + deltaInv)
tmp = kappa * S_j
newSigma = self.Sigma - np.dot(tmp, S_j.T)
deltaMu = -self.Mu[j] * tmp
self.Mu += deltaMu
self.SIn += kappa * np.dot(self.betaBasisPHI, S_j)**2
self.QIn -= np.dot(self.betaBasisPHI, deltaMu)
self.updateCount += 1
updateRequired = True
elif (self.selectedAction == self.actionAdd):
self.basisPhi = np.dot(self.basis.T, self.Phi)
self.basisPHI = np.hstack((self.basisPHI, self.basisPhi))
self.BPhi = self.beta * self.Phi
self.BASISBPhi = self.beta * self.basisPhi
tmp = np.dot(np.dot(self.BPhi.T, self.PHI), self.Sigma).T
self.Alpha = np.vstack((self.Alpha,newAlpha))
self.PHI = np.hstack((self.PHI, self.Phi))
s_ii = 1.0 / (newAlpha + self.SIn[nu])
s_i = -s_ii * tmp
TAU = -np.dot(s_i, tmp.T)
t1 = np.hstack((self.Sigma+TAU, s_i))
t2 = np.hstack((s_i.T, s_ii))
newSigma = np.vstack((t1,t2))
mu_i = s_ii * self.QIn[nu]
deltaMu = np.vstack((-mu_i*tmp, mu_i))
self.Mu = np.vstack((self.Mu, 0)) + deltaMu
mCi = self.BASISBPhi - np.dot(self.betaBasisPHI, tmp)
self.SIn -= s_ii * mCi**2
self.QIn -= mu_i * mCi
self.Used = np.hstack((self.Used, nu))
self.addCount += 1
updateRequired = True
elif (self.selectedAction == self.actionDelete):
self.basisPHI = np.delete(self.basisPHI, j, 1)
self.PHI = np.delete(self.PHI, j, 1)
self.Alpha = np.delete(self.Alpha, j, 0)
s_jj = self.Sigma[j,j]
s_j = self.Sigma[:,j]
tmp = s_j / s_jj
newSigma = self.Sigma - np.dot(tmp, s_j.T)
newSigma = np.delete(newSigma, j, 0)
newSigma = np.delete(newSigma, j, 1)
deltaMu = -self.Mu[j] * tmp
mu_j = self.Mu[j]
self.Mu += deltaMu
self.Mu = np.delete(self.Mu, j, 0)
jPm = np.dot(self.betaBasisPHI, s_j)
self.SIn += jPm**2 / s_jj
self.QIn += jPm * mu_j / s_jj
self.Used = np.delete(self.Used, j, 0)
self.deleteCount += 1
updateRequired = True
M = len(self.Used)
if (updateRequired):
self.SOut[:] = self.SIn
self.QOut[:] = self.QIn
tmp = self.Alpha / (self.Alpha - self.SIn[self.Used])
self.SOut[self.Used] = tmp * self.SIn[self.Used]
self.QOut[self.Used] = tmp * self.QIn[self.Used]
self.factor = (self.QOut * self.QOut - self.SOut)
self.Sigma = newSigma
self.Gamm = 1.0 - self.Alpha * np.atleast_2d(np.diag(self.Sigma)).T
self.betaBasisPHI = self.beta * self.basisPHI
self.logML += self.deltaLogMarginal[0,0]
self.countLoop += 1
self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
# Something went wrong. Recompute statistics
if (np.sum(self.Gamm) < 0):
self.fullStatistics()
# Recompute noise if not given
if (self.noise is None) and ((self.selectedAction == self.actionTerminate) or (self.loop <= self.controls['BetaUpdateStart']) or
(self.loop % self.controls['BetaUpdateFrequency'] == 0)):
# Gaussian noise estimate
betaZ1 = beta
y = np.dot(self.PHI, self.Mu)
e = self.targets - y
beta = (N - np.sum(self.Gamm)) / np.dot(e.T, e)
# Work-around zero-noise issue
beta = np.amin([beta, 1.e6 / np.variance(self.targets)])
deltaLogBeta = np.log(beta) - np.log(betaZ1)
# Full re-computation of statistics after beta update
if (np.abs(deltaLogBeta) > 1.e-6):
self.fullStatistics()
self.countLoop += 1
self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
if (self.selectedAction == self.actionTerminate):
self.selectedAction = self.actionNoiseOnly
print "Noise update. Termination deferred"
#self.AInv = np.diag(1.0/self.Alpha[:,0])
#Sigma = 1.0/self.beta * np.identity(self.N) + np.dot(np.dot(self.PHI, self.AInv), self.PHI.T)
#CInv, logD = cholInvert(Sigma)
#logL = -0.5*logD - 0.5*np.dot(np.dot(self.targets.T,CInv),self.targets)
print "{0:4d} - L={1:10.7f} - Gamma={2:10.7f} (M={3:4d}) - s={4:6.4f}".format(self.loop,self.logML[0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta))
if (self.selectedAction == self.actionTerminate):
print "Stopping at iteration {0} - max_delta_ml={1}".format(self.loop, self.deltaLogMarginal[0,0])
print "L={0} - Gamma={1} (M={2}) - s={3}".format(self.logML[0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta))
iterationLimit = self.loop == 200
self.lastIteration = iterationLimit
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Iterate until convergence
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def iterateUntilConvergence(self):
while (not self.lastIteration and self.selectedAction != self.actionTerminate):
self.loop += 1
self.oneIteration()
self.postProcess()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Post-process
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def postProcess(self):
#self.fullStatistics()
self.index = np.argsort(self.Used)
self.relevant = self.Used[self.index]
self.value = self.Mu[self.index] / np.atleast_2d(self.scales[self.Used[self.index]]).T
self.Alpha = self.Alpha[self.index] / (np.atleast_2d(self.scales[self.Used[self.index]]).T)**2
self.wInferred = np.zeros((self.MFull,1))
self.wInferred[self.relevant] = self.value
#***************
# Test
#***************
#import matplotlib.pyplot as pl
#N = 10
#M = 11
#XStenflo = np.asarray([-2.83000,-1.18000,0.870000,1.90000,3.96000,5.01000,6.25000,8.10000,9.98000,12.1200]).T
#Outputs = np.asarray([0.0211433,0.0167467,0.00938793,0.0183543,-0.00285475,-0.000381000,0.00374350,0.000126900,0.0121750,0.0268133]).T
## Define the basis functions. Gaussians of width 3.4 evaluated at the observed points
#basisWidth = 3.4
#C = XStenflo[:,np.newaxis]
#Basis = np.exp(-(XStenflo-C)**2 / basisWidth**2)
#Basis = np.hstack([Basis, np.ones((1,N)).T])
## Instantitate the RVM object and train it
#p = rvm(Basis, Outputs, noise=0.018)
#p.iterateUntilConvergence()
## Do some plots
#f = pl.figure(num=0)
#ax = f.add_subplot(1,1,1)
#ax.plot(XStenflo, Outputs, 'ro')
#ax.plot(XStenflo, np.dot(Basis, p.wInferred))
| {
"repo_name": "aasensio/pyAndres",
"path": "rvm.py",
"copies": "1",
"size": "17249",
"license": "mit",
"hash": 2086654084291598000,
"line_mean": 33.3605577689,
"line_max": 154,
"alpha_frac": 0.6395153342,
"autogenerated": false,
"ratio": 2.7223800505050506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.386189538470505,
"avg_score": null,
"num_lines": null
} |
__all__ = ["save_model", "load_model"]
import os
from .models import Lm, Lm2, Lmer
from .utils import _df_meta_to_arr
from rpy2.robjects.packages import importr
import deepdish as dd
import pandas as pd
import warnings
from tables import NaturalNameWarning
base = importr("base")
def save_model(model, filepath, compression="zlib", **kwargs):
"""
Function for saving pymer4 models. All models are saved in .h5 or .hdf5 files so filepath extensions should include this. For Lmer models an additional filepath.robj file will be created to retain all R objects.
Args:
model (pymer4.models): an instance of a pymer4 model
filepath (str): full filepath string ending with .h5 or .hd5f
compression (string): what kind of compression to use; zlib is the default which should be universally accessible, but for example 'blosc' will be faster and produce smaller files. See more here: https://bit.ly/33x9JD7
kwargs: optional keyword arguments to deepdish.io.save
"""
if filepath.endswith(".h5") or filepath.endswith(".hdf5"):
filename = filepath.split(".")[0]
# Separate out model attributes that are not pandas dataframes (or lists conatins dataframes) or R model objects
simple_atts, data_atts = {}, {}
for k, v in vars(model).items():
skip = False
if k == "model_obj":
skip = True
elif isinstance(v, pd.DataFrame):
skip = True
elif isinstance(v, list):
if any([isinstance(elem, pd.DataFrame) for elem in v]):
skip = True
if not skip:
simple_atts[k] = v
else:
data_atts[k] = v
simple_atts["model_class"] = model.__class__.__name__
# Now deal with other attributes
data_atts_separated = {}
for k, v in data_atts.items():
if k != "model_obj":
# Deconstruct pandas dataframes
if isinstance(v, pd.DataFrame):
cols, idx = _df_meta_to_arr(v)
vals = v.values
dtypes = v.dtypes.to_dict()
data_atts_separated[f"df_cols__{k}"] = cols
data_atts_separated[f"df_idx__{k}"] = idx
data_atts_separated[f"df_vals__{k}"] = vals
data_atts_separated[f"df_dtypes__{k}"] = dtypes
elif isinstance(v, list):
for i, elem in enumerate(v):
if isinstance(elem, pd.DataFrame):
cols, idx = _df_meta_to_arr(elem)
vals = elem.values
dtypes = elem.dtypes.to_dict()
data_atts_separated[f"list_{i}_cols__{k}"] = cols
data_atts_separated[f"list_{i}_idx__{k}"] = idx
data_atts_separated[f"list_{i}_vals__{k}"] = vals
data_atts_separated[f"list_{i}_dtypes__{k}"] = dtypes
else:
raise TypeError(
f"Value is list but list item is {type(elem)} not pd.DataFrame"
)
# Combine all attributes into a single dict and save with dd
model_atts = {}
model_atts["simple_atts"] = simple_atts
model_atts["data_atts"] = data_atts_separated
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
warnings.simplefilter("ignore", category=NaturalNameWarning)
dd.io.save(filepath, model_atts, compression=compression, **kwargs)
assert os.path.exists(filepath)
# Now deal with model object in R if needed
if model.model_obj is not None:
base.saveRDS(model.model_obj, f"{filename}.rds")
assert os.path.exists(f"{filename}.rds")
else:
raise IOError("filepath must end with .h5 or .hdf5")
def load_model(filepath):
"""
Function for loading pymer4 models. A file path ending in .h5 or .hdf5 should be provided. For Lmer models an additional filepath.robj should be located in the same directory.
Args:
model (pymer4.models): an instance of a pymer4 model
filepath (str): full filepath string ending with .h5 or .hd5f
"""
if filepath.endswith(".h5") or filepath.endswith(".hdf5"):
if not os.path.exists(filepath):
raise IOError("File not found!")
# Load h5 first
model_atts = dd.io.load(filepath)
# Figure out what kind of model we're dealing with
if model_atts["simple_atts"]["model_class"] == "Lmer":
model = Lmer("", [])
elif model_atts["simple_atts"]["model_class"] == "Lm2":
model = Lm2("", [], "")
elif model_atts["simple_atts"]["model_class"] == "Lm":
model = Lm("", [])
# Set top level attributes
for k, v in model_atts["simple_atts"].items():
if k != "model_class":
setattr(model, k, v)
# Make sure the model formula is a python string string so that rpy2 doesn't complain
model.formula = str(model.formula)
# Set data attributes
# Container for already set items
completed = []
for k, v in model_atts["data_atts"].items():
# Re-assembe dataframes
if k.startswith("df_"):
# First check if we haven't set it yet
if k not in completed:
# Get the id of this deconstructed df
item_name = k.split("__")[-1]
vals_name = f"df_vals__{item_name}"
cols_name = f"df_cols__{item_name}"
idx_name = f"df_idx__{item_name}"
dtype_name = f"df_dtypes__{item_name}"
# Reconstruct the dataframe
df = pd.DataFrame(
model_atts["data_atts"][vals_name],
columns=[
e.decode("utf-8") if isinstance(e, bytes) else e
for e in model_atts["data_atts"][cols_name]
],
index=[
e.decode("utf-8") if isinstance(e, bytes) else e
for e in model_atts["data_atts"][idx_name]
],
).astype(model_atts["data_atts"][dtype_name])
setattr(model, item_name, df)
# Add it to the list of completed items
completed.extend([item_name, vals_name, idx_name, dtype_name])
# Same idea for list items
elif k.startswith("list_"):
if k not in completed:
# Get the id of the deconstructed list
item_name = k.split("__")[-1]
item_idx = [e for e in k.split("__")[0] if e.isdigit()][0]
vals_name = f"list_{item_idx}_vals__{item_name}"
cols_name = f"list_{item_idx}_cols__{item_name}"
idx_name = f"list_{item_idx}_idx__{item_name}"
dtype_name = f"list_{item_idx}_dtypes__{item_name}"
# Reconstruct the dataframe
df = pd.DataFrame(
model_atts["data_atts"][vals_name],
columns=[
e.decode("utf-8") if isinstance(e, bytes) else e
for e in model_atts["data_atts"][cols_name]
],
index=[
e.decode("utf-8") if isinstance(e, bytes) else e
for e in model_atts["data_atts"][idx_name]
],
).astype(model_atts["data_atts"][dtype_name])
# Check if the list already exists if so just append to it
if hasattr(model, item_name):
current_items = getattr(model, item_name)
if current_items is not None:
current_items += [df]
setattr(model, item_name, current_items)
else:
setattr(model, item_name, [df])
# Otherwise create it
else:
setattr(model, item_name, [df])
# Add to the list of completed items
completed.extend([item_name, vals_name, idx_name, dtype_name])
# Now deal with model object in R if needed
if isinstance(model, Lmer):
filename = filepath.split(".")[0]
model.model_obj = base.readRDS(f"{filename}.rds")
return model
else:
raise IOError("filepath must end with .h5 or .hdf5")
| {
"repo_name": "ejolly/pymer4",
"path": "pymer4/io.py",
"copies": "1",
"size": "8966",
"license": "mit",
"hash": -5985140483151603000,
"line_mean": 45.4559585492,
"line_max": 226,
"alpha_frac": 0.5089225965,
"autogenerated": false,
"ratio": 4.329309512312892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5338232108812893,
"avg_score": null,
"num_lines": null
} |
__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = gzip.GzipFile(f)
if sys.version_info[0] >= 3:
import types
f.seek = types.MethodType(seek, f)
f.tell = types.MethodType(tell, f)
else:
import new
f.seek = new.instancemethod(seek, f)
f.tell = new.instancemethod(tell, f)
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
_zip = zipfile.ZipFile(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
if isinstance(file, basestring):
fid = open(file, "rb")
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
return NpzFile(fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` compressed archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
else:
fid = file
arr = np.asanyarray(arr)
format.write_array(fid, arr)
def savez(file, *args, **kwds):
"""
Save several arrays into a single, compressed file in ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
\\*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
\\*\\*kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. Each file contains one variable in ``.npy``
format. For a description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\*\\*kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w")
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array. If this is a record data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type.
comments : str, optional
The character used to indicate the start of a comment.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
skiprows : int, optional
Skip the first `skiprows` lines.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. Default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads Matlab(R) data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
isstring = False
if _is_string_like(fname):
isstring = True
if fname.endswith('.gz'):
import gzip
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if isstring:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 2.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
if not hasattr(file, "read"):
file = open(file, 'rb')
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
names = dtype.names
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i].update(conv, locked=True,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
if len(invalid) > 0:
nbrows = len(rows)
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbrows -= skip_footer
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid if i < nbrows]
else:
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| {
"repo_name": "dagss/numpy_svn",
"path": "numpy/lib/npyio.py",
"copies": "4",
"size": "56059",
"license": "bsd-3-clause",
"hash": -7707728112892135000,
"line_mean": 33.7760545906,
"line_max": 82,
"alpha_frac": 0.5646550955,
"autogenerated": false,
"ratio": 4.141780568895456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6706435664395456,
"avg_score": null,
"num_lines": null
} |
__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import re
import sys
import itertools
import warnings
import weakref
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load an array(s) or pickled objects from .npy, .npz, or pickled files.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful for
accessing small fragments of large files without reading the entire
file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For '.npz' files, the returned instance of
NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the context
manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlyling file descriptor is closed when exiting the 'with' block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=own_fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed .npz file format
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| {
"repo_name": "pelson/numpy",
"path": "numpy/lib/npyio.py",
"copies": "9",
"size": "65323",
"license": "bsd-3-clause",
"hash": -4798908887681938000,
"line_mean": 33.9507758159,
"line_max": 87,
"alpha_frac": 0.5584709827,
"autogenerated": false,
"ratio": 4.1288793375892805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006620367064836386,
"num_lines": 1869
} |
__all__ = ['savetxt', 'loadtxt',
'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv',
'load', 'loads',
'save', 'savez',
'packbits', 'unpackbits',
'fromregex',
'DataSource']
import numpy as np
import format
import cStringIO
import os
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype
_file = file
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip, new
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = gzip.GzipFile(f)
f.seek = new.instancemethod(seek, f)
f.tell = new.instancemethod(tell, f)
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute lookups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute lookup is performed.
Examples
--------
>>> class BagDemo(object):
... def __getitem__(self, key):
... return key
...
>>> demo_obj = BagDemo()
>>> bagobj = np.lib.io.BagObj(demo_obj)
>>> bagobj.some_item
'some_item'
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
_zip = zipfile.ZipFile(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = cStringIO.StringIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
if isinstance(file, basestring):
fid = _file(file, "rb")
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = 'PK\x03\x04'
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
return NpzFile(fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or string
File or filename to which the data is saved. If the filename
does not already have a ``.npy`` extension, it is added.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` compressed archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # only necessary in this example (with tempfile)
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
else:
fid = file
arr = np.asanyarray(arr)
format.write_array(fid, arr)
def savez(file, *args, **kwds):
"""
Save several arrays into a single, compressed file in ``.npz`` format.
If keyword arguments are given, the names for variables assigned to the
keywords are the keyword names (not the variable names in the caller).
If arguments are passed in with no keywords, the corresponding variable
names are arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
If file is a string, it names the output file. ".npz" will be appended
to the file name if it is not already there.
args : Arguments
Any function arguments other than the file name are variables to save.
Since it is not possible for Python to know their names outside
`savez`, they will be saved with names "arr_0", "arr_1", and so on.
These arguments can be any expression.
kwds : Keyword arguments
All keyword=value pairs cause the value to be saved with the name of
the keyword.
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. Each file contains one variable in ``.npy``
format. For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # only necessary in this example (with tempfile)
>>> npz = np.load(outfile)
>>> npz.files
['arr_1', 'arr_0']
>>> npz['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\*\\*kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> npz.files
['y', 'x']
>>> npz['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w")
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array. If this is a record data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type.
comments : str, optional
The character used to indicate the start of a comment.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
skiprows : int, optional
Skip the first `skiprows` lines.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. Default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
scipy.io.loadmat : reads Matlab(R) data files
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
user_converters = converters
if usecols is not None:
usecols = list(usecols)
isstring = False
if _is_string_like(fname):
isstring = True
if fname.endswith('.gz'):
import gzip
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = file(fname)
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = line.split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if first_line == '': # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if isstring:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # X is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
fh = file(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(format % tuple(row) + '\n')
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
[('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
if not hasattr(file, "read"):
file = open(file, 'r')
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None, excludelist=None, deletechars=None,
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if first_line == '':
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = ''.join(first_line.split(comments)[1])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_.strip() for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
names = dtype.names
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != '':
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(",")]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i].update(conv, locked=True,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
if len(invalid) > 0:
nbrows = len(rows)
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbrows -= skip_footer
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid if i < nbrows]
else:
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*(map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)))
else:
rows = zip(*(map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
if usemask and output.dtype.names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| {
"repo_name": "NirBenTalLab/proorigami-cde-package",
"path": "cde-root/usr/lib64/python2.4/site-packages/numpy/lib/io.py",
"copies": "2",
"size": "53226",
"license": "mit",
"hash": -2120336389264801800,
"line_mean": 33.6072821847,
"line_max": 82,
"alpha_frac": 0.5619997745,
"autogenerated": false,
"ratio": 4.148881440486398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5710881214986399,
"avg_score": null,
"num_lines": null
} |
__all__ = ["scale_samples", "read_param_file"]
from collections import OrderedDict
import csv
from warnings import warn
import numpy as np
def scale_samples(params, bounds):
'''
Rescales samples in 0-to-1 range to arbitrary bounds.
Arguments:
bounds - list of lists of dimensions num_params-by-2
params - numpy array of dimensions num_params-by-N,
where N is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def unscale_samples(params, bounds):
'''
Rescales samples from arbitrary bounds back to [0,1] range.
Arguments:
bounds - list of lists of dimensions num_params-by-2
params - numpy array of dimensions num_params-by-N,
where N is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# (sample - lower_bound) / (upper_bound - lower_bound)
np.divide(np.subtract(params, lower_bounds, out=params),
np.subtract(upper_bounds, lower_bounds),
out=params)
def read_param_file(filename, delimiter=None):
'''
Reads a parameter file of format:
Param1,0,1,Group1
Param2,0,1,Group2
Param3,0,1,Group3
And returns a dictionary containing:
- names - the names of the parameters
- bounds - a list of lists of lower and upper bounds
- num_vars - a scalar indicating the number of variables
(the length of names)
- groups - a tuple containing i) a group matrix assigning parameters to
groups
ii) a list of unique group names
'''
names = []
bounds = []
group_list = []
num_vars = 0
fieldnames = ['name', 'lower_bound', 'upper_bound', 'group']
with open(filename, 'rU') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter)
csvfile.seek(0)
reader = csv.DictReader(
csvfile, fieldnames=fieldnames, dialect=dialect)
for row in reader:
if row['name'].strip().startswith('#'):
pass
else:
num_vars += 1
names.append(row['name'])
bounds.append(
[float(row['lower_bound']), float(row['upper_bound'])])
# If the fourth column does not contain a group name, use
# the parameter name
if row['group'] is None:
group_list.append(row['name'])
else:
group_list.append(row['group'])
group_matrix, group_names = compute_groups_from_parameter_file(
group_list, num_vars)
if np.all(group_matrix == np.eye(num_vars)):
group_tuple = None
else:
group_tuple = (group_matrix, group_names)
return {'names': names, 'bounds': bounds, 'num_vars': num_vars,
'groups': group_tuple}
def compute_groups_from_parameter_file(group_list, num_vars):
'''
Computes a k-by-g matrix which notes factor membership of groups
where:
k is the number of variables (factors)
g is the number of groups
Also returns a g-length list of unique group_names whose positions
correspond to the order of groups in the k-by-g matrix
'''
# Get a unique set of the group names
unique_group_names = list(OrderedDict.fromkeys(group_list))
number_of_groups = len(unique_group_names)
indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)])
output = np.zeros((num_vars, number_of_groups), dtype=np.int)
for parameter_row, group_membership in enumerate(group_list):
group_index = indices[group_membership]
output[parameter_row, group_index] = 1
return np.matrix(output), unique_group_names
def requires_gurobipy(_has_gurobi):
'''
Decorator function which takes a boolean _has_gurobi as an argument.
Use decorate any functions which require gurobi.
Raises an import error at runtime if gurobi is not present.
Note that all runtime errors should be avoided in the working code,
using brute force options as preference.
'''
def _outer_wrapper(wrapped_function):
def _wrapper(*args, **kwargs):
if _has_gurobi:
result = wrapped_function(*args, **kwargs)
else:
warn("Gurobi not available", ImportWarning)
result = None
return result
return _wrapper
return _outer_wrapper
| {
"repo_name": "t2abdulg/SALib",
"path": "SALib/util/__init__.py",
"copies": "2",
"size": "5466",
"license": "mit",
"hash": -1327670954280167700,
"line_mean": 34.0384615385,
"line_max": 79,
"alpha_frac": 0.6042810099,
"autogenerated": false,
"ratio": 4.112866817155756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015916898895622302,
"num_lines": 156
} |
__all__ = ["ScanDirectoryNode"]
from panda3d.core import VirtualFileSystem, VirtualFileMountSystem, Filename, TiXmlDocument
vfs = VirtualFileSystem.getGlobalPtr()
class ScanDirectoryNode:
""" This class is used to scan a list of files on disk. """
def __init__(self, pathname, ignoreUsageXml = False):
self.pathname = pathname
self.filenames = []
self.fileSize = 0
self.nested = []
self.nestedSize = 0
xusage = None
if not ignoreUsageXml:
# Look for a usage.xml file in this directory. If we find
# one, we read it for the file size and then stop here, as
# an optimization.
usageFilename = Filename(pathname, 'usage.xml')
doc = TiXmlDocument(usageFilename.toOsSpecific())
if doc.LoadFile():
xusage = doc.FirstChildElement('usage')
if xusage:
diskSpace = xusage.Attribute('disk_space')
try:
diskSpace = int(diskSpace or '')
except ValueError:
diskSpace = None
if diskSpace is not None:
self.fileSize = diskSpace
return
files = vfs.scanDirectory(self.pathname)
if files is None:
files = []
for vfile in files:
if hasattr(vfile, 'getMount'):
if not isinstance(vfile.getMount(), VirtualFileMountSystem):
# Not a real file; ignore it.
continue
if vfile.isDirectory():
# A nested directory.
subdir = ScanDirectoryNode(vfile.getFilename(), ignoreUsageXml = ignoreUsageXml)
self.nested.append(subdir)
self.nestedSize += subdir.getTotalSize()
elif vfile.isRegularFile():
# A nested file.
self.filenames.append(vfile.getFilename())
self.fileSize += vfile.getFileSize()
else:
# Some other wacky file thing.
self.filenames.append(vfile.getFilename())
if xusage:
# Now update the usage.xml file with the newly-determined
# disk space.
xusage.SetAttribute('disk_space', str(self.getTotalSize()))
tfile = Filename.temporary(str(pathname), '.xml')
if doc.SaveFile(tfile.toOsSpecific()):
tfile.renameTo(usageFilename)
def getTotalSize(self):
return self.nestedSize + self.fileSize
def extractSubdir(self, pathname):
""" Finds the ScanDirectoryNode within this node that
corresponds to the indicated full pathname. If it is found,
removes it from its parent, and returns it. If it is not
found, returns None. """
# We could be a little smarter here, but why bother. Just
# recursively search all children.
for subdir in self.nested:
if subdir.pathname == pathname:
self.nested.remove(subdir)
self.nestedSize -= subdir.getTotalSize()
return subdir
result = subdir.extractSubdir(pathname)
if result:
self.nestedSize -= result.getTotalSize()
if subdir.getTotalSize() == 0:
# No other files in the subdirectory that contains
# this package; remove it too.
self.nested.remove(subdir)
return result
return None
| {
"repo_name": "tobspr/panda3d",
"path": "direct/src/p3d/ScanDirectoryNode.py",
"copies": "19",
"size": "3592",
"license": "bsd-3-clause",
"hash": 3610360276469740000,
"line_mean": 36.8105263158,
"line_max": 96,
"alpha_frac": 0.5534521158,
"autogenerated": false,
"ratio": 4.757615894039735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Schema']
import os
import sys
import yaml
from collections import OrderedDict
from .column import Column
class Schema(object):
def __init__(self, table, type_fields=None):
self.table = table
# schema path
schema_path = self.get_path()
# type_fields
if type_fields and not os.path.exists(schema_path):
# type_fields
_type_fields = {}
for c, t in type_fields.items():
if c == 'primary_key':
_type_fields[c] = t
else:
_type_fields[c] = dict(t)
# save schema
with open(schema_path, 'wb') as f:
s = yaml.dump(_type_fields)
f.write(s)
elif not type_fields and os.path.exists(schema_path):
# load schema
with open(schema_path, 'rb') as f:
_type_fields = yaml.load(f)
# sort type_fields
type_fields = OrderedDict(
(c, Column(**t))
for c, t in sorted(_type_fields.items(), key=lambda n: n[0])
if c != 'primary_key'
)
# add primary_key at the end of dict
type_fields['primary_key'] = _type_fields['primary_key']
elif type_fields and os.path.exists(schema_path):
# FIXME: compare given type_fields with schema's type_fields
pass
else:
raise Exception('')
self.type_fields = type_fields
def __getitem__(self, key):
return self.type_fields[key]
def __getattr__(self, attr):
return self.type_fields[attr]
def __contains__(self, n):
return n in self.type_fields
def __iter__(self):
for k, v in self.type_fields.items():
if k == 'primary_key':
continue
yield k, v
def get_path(self):
return os.path.join(self.table.get_path(), 'schema.yaml')
| {
"repo_name": "yadb/yadb",
"path": "backup/store/schema.py",
"copies": "1",
"size": "1998",
"license": "mit",
"hash": -2057294538741646000,
"line_mean": 27.1408450704,
"line_max": 76,
"alpha_frac": 0.5055055055,
"autogenerated": false,
"ratio": 4.044534412955466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032885737111089225,
"num_lines": 71
} |
# All Schevo schema modules must have these lines.
from schevo.schema import *
schevo.schema.prep(locals())
class SchevoIcon(E.Entity):
_hidden = True
name = f.string()
data = f.image()
_key(name)
class Gender(E.Entity):
"""Gender of a person."""
code = f.string()
name = f.string()
@f.integer(label=u'Person Count')
def count(self):
return self.s.count('Person', 'gender')
_key(code)
_key(name)
_initial = [
(u'F', u'Female'),
(u'M', u'Male'),
(u'U', u'Unknown'),
]
class Item(E.Entity):
"""Something that must be done."""
done = f.boolean(default=False)
name = f.string()
topic = f.entity('Topic', required=False)
priority = f.entity('Priority')
person = f.entity('Person', required=False)
notes = f.string(multiline=True, required=False)
class Person(E.Entity):
"""Individual human being."""
_plural = u'People'
name = f.string()
gender = f.entity('Gender')
_key(name)
_sample = [
('Jane Doe', ('F',)),
('John Doe', ('M',)),
]
class Priority(E.Entity):
"""Time-criticalness of a todo item."""
_plural = u'Priorities'
code = f.integer()
name = f.string()
@f.integer(label=u'# Open Items')
def open(self):
return len([item for item in self.m.items() if not item.done])
@f.integer(label=u'# Done Items')
def done(self):
return len([item for item in self.m.items() if item.done])
_key(code)
_key(name)
_sample = [
(1, 'Top'),
(2, 'Mid'),
(3, 'Low'),
]
def __str__(self):
return '%s %s' % (self.code, self.name)
class Topic(E.Entity):
"""Subject area for todo items."""
name = f.string()
_sample = [
('Home', ),
('Work', ),
]
| {
"repo_name": "Schevo/schevo",
"path": "schevo/example/todo/schema/todo_001.py",
"copies": "1",
"size": "1861",
"license": "mit",
"hash": -217341552666518080,
"line_mean": 17.61,
"line_max": 70,
"alpha_frac": 0.5325094035,
"autogenerated": false,
"ratio": 3.2764084507042255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43089178542042256,
"avg_score": null,
"num_lines": null
} |
# All scorer.py files must contain the function score(), which takes as input the following items: an instance of one of
# the models of a modelclass.py file, a feature set, the corresponding correct labels of the feature set, and the set of
# "hyperparameters" of the scoring function. It then returns a dictionary containing the model's scores for various
# performance metrics e.g. accuracy, precision, recall, etc.
# It must also include the function scorer_name(), returning the name of the scoring method used.
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score
import numpy as np
# NOTES ABOUT THIS PARTICULAR SCORER
# This scorer randomly chooses N-fold splits and returns the averaged performance metrics of all N folds
def score(model, feature_set, labels, N=3, shuffle=False, calc_acc=True, calc_prc=True, calc_rec=True, calc_f1=True, calc_cm=True):
assert len(labels) >= N
skf = KFold(len(labels), n_folds=N, shuffle=shuffle)
accuracy = []
precision = []
recall = []
f1 = []
confusion_matrices = []
for train, test in skf:
X_train, X_test, y_train, y_test = feature_set[train], feature_set[test], labels[train], labels[test]
model.fit(X_train, y_train)
prediction = model.predict(X_test)
if calc_acc:
accuracy.append(accuracy_score(y_test, prediction))
if calc_prc:
precision.append(precision_score(y_test, prediction, pos_label=None, average='weighted'))
if calc_rec:
recall.append(recall_score(y_test, prediction, pos_label=None, average='weighted'))
if calc_f1:
f1.append(f1_score(y_test, prediction, pos_label=None, average='weighted'))
if calc_cm:
confusion_matrices.append(confusion_matrix(y_test, prediction))
metrics = {}
if calc_acc:
metrics["accuracy"] = np.mean(accuracy)
if calc_prc:
metrics["precision"] = np.mean(precision)
if calc_rec:
metrics["recall"] = np.mean(recall)
if calc_f1:
metrics["f1"] = np.mean(f1)
if calc_cm:
metrics["confusion_matrix"] = confusion_matrices[0]
for matrix in confusion_matrices[1:]:
metrics["confusion_matrix"] += matrix
return metrics
def scorer_name():
return "N-fold Cross Validation"
| {
"repo_name": "reiinakano/ensemble_helper",
"path": "src/enScorers/NFoldCV/scorer.py",
"copies": "1",
"size": "2392",
"license": "mit",
"hash": -3140590475851311000,
"line_mean": 45,
"line_max": 131,
"alpha_frac": 0.6755852843,
"autogenerated": false,
"ratio": 3.697063369397218,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872648653697218,
"avg_score": null,
"num_lines": null
} |
__all__ = ["SCPI"]
import socket
import time
from utility.log import VLOG
from utility.status import *
class SCPI(object):
def __init__(self, host_ip):
self.host_ip = host_ip
self.port = 1024
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((host_ip, self.port))
def Wait(self):
self.client.send("*WAI\n")
def RST(self):
self.client.send("*RST\n")
def CLS(self):
self.client.send("*CLS\n")
# select trace n in window1
def ActivateTrace(self, n):
self.client.send("DISPlay:WINDow1:TRACe" + n + ":SElect\n")
# delete trace n in window1
def ClearTrace(self, n):
self.client.send("DISPlay:WINDow1:TRACe" + n + ":DElete\n")
def CreateMeasureVar(self, method, n):
# e.g S11 ---> MeasureS11
prefix_var = "Measure" + method;
self.client.send("CALCulate" + n + ":PARameter:DEFine '" + \
prefix_var + "'," + method + "\n")
def CreateTraceAssotiateWithVar(self, method, n):
prefix_var = "Measure" + method;
self.client.send("DISPlay:WINDow1:TRACe" + n + ":FEED '" + \
prefix_var + "'\n")
def SetRange(self, start, stop, n):
# set frequency range
self.client.send("SENSe" + n + ":FREQuency:START " + start + "\n")
self.client.send("SENSe" + n + ":FREQuency:STOP " + stop + "\n")
def CreateMark(self, n):
self.client.send("CALC" + n + ":MARK" + n + ":STAT ON\n")
def SetMarkFreq(self, n, Freq):
# set start frequency
self.client.send("CALC" + n + ":MARK" + n + ":X " + Freq + "\n")
def MarkerFormat(self, n):
#self.client.send("CALC" + n + ":MARK" + n + ":FORMat LOGPhase\n")
self.client.send("CALC" + n + ":MARK" + n + ":FORMat MLOG\n")
#self.client.send("CALC" + n + ":MARK" + n + ":FORMat \n")
pass
def GetMarkerX(self, n):
self.client.send("CALC" + n + ":MARK" + n + ":X?\n")
return self.client.recv(100)
def GetMarkerY(self, n):
self.client.send("CALC" + n + ":MARK" + n + ":Y?\n")
return self.client.recv(100)
def Close(self):
self.client.close()
| {
"repo_name": "weibohit/tools",
"path": "driver/scpi.py",
"copies": "1",
"size": "2058",
"license": "mit",
"hash": -2955820927644370400,
"line_mean": 27.985915493,
"line_max": 70,
"alpha_frac": 0.5962099125,
"autogenerated": false,
"ratio": 2.8114754098360657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3907685322336065,
"avg_score": null,
"num_lines": null
} |
__all__ = ['scraper', 'local_scraper', 'pw_scraper', 'uflix_scraper', 'watchseries_scraper', 'movie25_scraper', 'merdb_scraper', '2movies_scraper', 'icefilms_scraper',
'movieshd_scraper', 'yifytv_scraper', 'viooz_scraper', 'filmstreaming_scraper', 'myvideolinks_scraper', 'filmikz_scraper', 'clickplay_scraper', 'nitertv_scraper',
'iwatch_scraper', 'ororotv_scraper', 'view47_scraper', 'vidics_scraper', 'oneclickwatch_scraper', 'istreamhd_scraper', 'losmovies_scraper', 'movie4k_scraper',
'noobroom_scraper', 'solar_scraper', 'vkbox_scraper', 'directdl_scraper', 'movietv_scraper', 'moviesonline7_scraper', 'streamallthis_scraper', 'afdah_scraper',
'streamtv_scraper', 'moviestorm_scraper', 'wmo_scraper', 'zumvo_scraper', 'wso_scraper', 'tvrelease_scraper', 'hdmz_scraper', 'ch131_scraper', 'watchfree_scraper',
'pftv_scraper', 'flixanity_scraper', 'cmz_scraper', 'movienight_scraper', 'gvcenter_scraper', 'alluc_scraper', 'afdahorg_scraper', 'xmovies8_scraper',
'yifystreaming_scraper', 'mintmovies_scraper', 'playbox_scraper', 'shush_proxy', 'mvsnap_scraper', 'pubfilm_scraper', 'pctf_scraper', 'rlssource_scraper',
'couchtunerv1_scraper', 'couchtunerv2_scraper', 'tunemovie_scraper', 'watch8now_scraper', 'megabox_scraper', 'dizilab_scraper', 'beinmovie_scraper',
'dizimag_scraper', 'ayyex_scraper']
import re
import os
import xbmcaddon
import xbmc
import datetime
import time
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from . import scraper # just to avoid editor warning
from . import *
class ScraperVideo:
def __init__(self, video_type, title, year, trakt_id, season='', episode='', ep_title='', ep_airdate=''):
assert(video_type in (VIDEO_TYPES.__dict__[k] for k in VIDEO_TYPES.__dict__ if not k.startswith('__')))
self.video_type = video_type
self.title = title
self.year = year
self.season = season
self.episode = episode
self.ep_title = ep_title
self.trakt_id = trakt_id
self.ep_airdate = None
if ep_airdate:
try: self.ep_airdate = datetime.datetime.strptime(ep_airdate, "%Y-%m-%d").date()
except (TypeError, ImportError): self.ep_airdate = datetime.date(*(time.strptime(ep_airdate, '%Y-%m-%d')[0:3]))
def __str__(self):
return '|%s|%s|%s|%s|%s|%s|%s|' % (self.video_type, self.title, self.year, self.season, self.episode, self.ep_title, self.ep_airdate)
def update_xml(xml, new_settings, cat_count):
new_settings.insert(0, '<category label="Scrapers %s">' % (cat_count))
new_settings.append(' </category>')
new_str = '\n'.join(new_settings)
match = re.search('(<category label="Scrapers %s">.*?</category>)' % (cat_count), xml, re.DOTALL | re.I)
if match:
old_settings = match.group(1)
if old_settings != new_settings:
xml = xml.replace(old_settings, new_str)
else:
log_utils.log('Unable to match category: %s' % (cat_count), xbmc.LOGWARNING)
return xml
def update_settings():
path = xbmcaddon.Addon().getAddonInfo('path')
full_path = os.path.join(path, 'resources', 'settings.xml')
try:
with open(full_path, 'r') as f:
xml = f.read()
except:
raise
new_settings = []
cat_count = 1
old_xml = xml
classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper)
for cls in sorted(classes, key=lambda x: x.get_name().upper()):
new_settings += cls.get_settings()
if len(new_settings) > 90:
xml = update_xml(xml, new_settings, cat_count)
new_settings = []
cat_count += 1
if new_settings:
xml = update_xml(xml, new_settings, cat_count)
if xml != old_xml:
try:
with open(full_path, 'w') as f:
f.write(xml)
except:
raise
else:
log_utils.log('No Settings Update Needed', xbmc.LOGDEBUG)
update_settings()
| {
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"path": ".kodi/addons/plugin.video.salts/scrapers/__init__.py",
"copies": "1",
"size": "4030",
"license": "apache-2.0",
"hash": 8293275888796567000,
"line_mean": 45.8604651163,
"line_max": 174,
"alpha_frac": 0.6285359801,
"autogenerated": false,
"ratio": 3.0833970925784238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4211933072678424,
"avg_score": null,
"num_lines": null
} |
__all__ = ["Scraper"]
from .parser import ChapterParser
from .story import Chapter, Story
from .util import get_choice_names
import re
CHYOA_URL_REGEX = re.compile(r"^https://chyoa.com/story/[^ ]+\.[0-9]+$")
class Scraper(object):
def __init__(self):
self.parser = ChapterParser()
def _reset(self):
self.story = None
self.visited = set()
self.to_visit = []
def scrape(self, url, recursive=True):
self._reset()
fields = self.parser.get_chapter_fields(url)
self.visited.add(url)
self.story = Story(**fields)
print("Story \"%s\":\nRoot \"%s\":\n%s" % (
self.story.title, self.story.name, get_choice_names(self.story.choices)))
self._scrape_urls(list(self.story.choices))
if recursive:
while self.to_visit:
self._scrape_urls(self.to_visit)
return self.story
def _scrape_urls(self, urls):
new_to_visit = []
for i in range(len(urls) - 1, -1, -1):
id, url = urls[i]
del urls[i]
if url in self.visited:
print("(already visited %s)" % url)
continue
self.visited.add(url)
fields = self.parser.get_chapter_fields(url)
if fields is None:
continue
chapter = Chapter(**fields)
self.story.chapters[id] = chapter
print("Chapter \"%s\":\n%s" % (chapter.name, get_choice_names(chapter.choices)))
new_to_visit += list(chapter.choices)
self.to_visit = new_to_visit
@staticmethod
def is_chyoa_url(url):
return bool(CHYOA_URL_REGEX.fullmatch(url))
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/scraper.py",
"copies": "1",
"size": "1703",
"license": "mit",
"hash": 4660243847295012000,
"line_mean": 26.4677419355,
"line_max": 92,
"alpha_frac": 0.5513799178,
"autogenerated": false,
"ratio": 3.4897540983606556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45411340161606556,
"avg_score": null,
"num_lines": null
} |
__all__ = ('ScriptError', 'ImportScriptError', 'InstallDependencyError',
'DependencyBlacklistedError', 'ScriptExecutionError', 'BadBehavingScriptError',
'RequirementError')
class ScriptError(Exception):
"""
A general script exception all other exceptions are derived from.
"""
pass
class ImportScriptError(ScriptError):
"""
A script could not be imported.
Arguments:
- `name`: The name of the script.
"""
def __init__(self, name):
self.name = name
def __str__(self):
return 'Could not import script "{}"'.format(self.name)
class InstallDependencyError(ScriptError):
"""
A script dependency could not be installed.
Arguments:
- `dependency`: The dependency name.
"""
def __init__(self, dependency):
self.dependency = dependency
def __str__(self):
return 'Dependency needs to be installed manually via pip "{}"'.format(
self.dependency)
class DependencyBlacklistedError(ScriptError):
"""
A script dependency is blacklisted and cannot be installed.
Arguments:
- `dependency`: The dependency name.
"""
def __init__(self, dependency):
self.dependency = dependency
def __str__(self):
return 'Could not install dependency "{}"'.format(self.dependency)
class ScriptExecutionError(ScriptError):
"""
An uncaught exception was raised in a script.
Arguments:
- `name`: The name of the script.
"""
def __init__(self, name):
self.name = name
def __str__(self):
return 'Script "{}" returned with an exception'.format(self.name)
class BadBehavingScriptError(ScriptError):
"""
A script did not stop in time after it has been cancelled.
"""
class RequirementError(ScriptError):
"""
A general requirement exception.
Arguments:
- `requirement`: One or more requirements that are missing.
"""
def __init__(self, requirement):
self.requirement = requirement
def __str__(self):
return 'Missing: {}'.format(self.requirement)
| {
"repo_name": "lgrahl/scripthookvpy3k",
"path": "python/gta/exceptions.py",
"copies": "1",
"size": "2127",
"license": "mit",
"hash": -1783305598626295600,
"line_mean": 23.4482758621,
"line_max": 90,
"alpha_frac": 0.6276445698,
"autogenerated": false,
"ratio": 4.535181236673774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012631047113805736,
"num_lines": 87
} |
__all__ = ["SelectReactor"]
from .abstractreactor import AbstractReactor
from select import select
from collections import defaultdict
# 基于select的Reactor的具体实现
class SelectReactor(AbstractReactor):
EV_NULL = 0x00
EV_DISCONNECTED = 0x18
EV_IN = 0x01
EV_OUT = 0x04
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select(self._r_list, self._w_list, self._x_list, timeout)
results = defaultdict(lambda: self.EV_NULL)
for p in [(r, self.EV_IN), (w, self.EV_OUT), (x, self.EV_DISCONNECTED)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def register(self, fd, mode):
if mode & self.EV_IN:
self._r_list.add(fd)
if mode & self.EV_OUT:
self._w_list.add(fd)
self._x_list.add(fd)
def unregister(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify(self, fd, mode):
self.unregister(fd)
self.register(fd, mode)
| {
"repo_name": "Hevienz/greenev",
"path": "greenev/reactor/selectreactor.py",
"copies": "1",
"size": "1260",
"license": "apache-2.0",
"hash": 4848696320817896000,
"line_mean": 26.0434782609,
"line_max": 80,
"alpha_frac": 0.557073955,
"autogenerated": false,
"ratio": 3.1815856777493607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4238659632749361,
"avg_score": null,
"num_lines": null
} |
__all__ = ["send",
"track_send_item",
"send_shipment",
"receive",
"track_recv_item",
"recv_shipment",
"recv_sent_shipment",
"send_rec",
"send_get_id",
"send_get_ref",
"recv_rec",
"recv_get_id",
"dbcallback_getStockLevels",
]
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
#from selenium.webdriver.common.keys import Keys
from gluon import current
from s3 import s3_debug
import time
from tests import *
class InvTestFunctions(SeleniumUnitTest):
def send(self, user, data):
"""
@case: INV
@description: Functions which runs specific workflows for Inventory tes
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/send/create")
table = "inv_send"
result = self.create(table, data)
s3_debug("WB reference: %s" % self.send_get_ref(result))
return result
# -------------------------------------------------------------------------
def track_send_item(self, user, send_id, data, removed=True):
"""
Helper method to add a track item to the inv_send with the
given send_id by the given user
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/send/%s/track_item" % send_id)
table = "inv_track_item"
result = self.create(table, data, dbcallback = self.dbcallback_getStockLevels)
# Get the last record in the before & after
# this will give the stock record which has been added to the end by
# the getStockLevels callback
if removed:
qnty = 0
for line in data:
if line[0] == "quantity":
qnty = float(line[1])
break
stock_before = result["before"].records[len(result["before"])-1].quantity
stock_after = result["after"].records[len(result["after"])-1].quantity
stock_shipped = qnty
self.assertTrue( stock_before - stock_after == stock_shipped, "Warehouse stock not properly adjusted, was %s should be %s but is recorded as %s" % (stock_before, stock_after, stock_before - stock_shipped))
s3_debug ("Stock level before %s, stock level after %s" % (stock_before, stock_after))
return result
# -------------------------------------------------------------------------
def send_shipment(self, user, send_id):
"""
Helper method to send a shipment with id of send_id
"""
s3db = current.s3db
db = current.db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 0, "Shipment is not status preparing")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 1, "Shipment item is not status preparing")
s3_debug("Shipment items are all of status: preparing")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send_process/%s" % send_id)
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# -------------------------------------------------------------------------
def receive(self, user, data):
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/recv/create")
table = "inv_recv"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def track_recv_item(self, user, recv_id, data, removed=True):
"""
Helper method to add a track item to the inv_recv with the
given recv_id
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
table = "inv_track_item"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def recv_shipment(self, user, recv_id, data):
"""
Helper method that will receive the shipment, adding the
totals that arrived
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
s3db = current.s3db
db = current.db
rvtable = s3db.inv_recv
iitable = s3db.inv_inv_item
# First get the site_id
query = (rvtable.id == recv_id)
record = db(query).select(rvtable.site_id,
limitby=(0, 1)).first()
site_id = record.site_id
# Now get all the inventory items for the site
query = (iitable.site_id == site_id)
before = db(query).select(orderby=iitable.id)
self.login(account=user, nexturl="inv/recv_process/%s" % recv_id)
query = (iitable.site_id == site_id)
after = db(query).select(orderby=iitable.id)
# Find the differences between the before and the after
changes = []
for a_rec in after:
found = False
for b_rec in before:
if a_rec.id == b_rec.id:
if a_rec.quantity != b_rec.quantity:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity - b_rec.quantity)
)
found = True
break
if not found:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity)
)
# changes now contains the list of changed or new records
# these should match the records received
# first check are the lengths the same?
self.assertTrue(len(data) == len(changes),
"The number of changed inventory items (%s) doesn't match the number of items received (%s)." %
(len(changes), len(data))
)
for line in data:
rec = line["record"]
found = False
for change in changes:
if rec.inv_track_item.item_id == change[0] and \
rec.inv_track_item.item_pack_id == change[1] and \
rec.inv_track_item.quantity == change[2]:
found = True
break
if found:
s3_debug("%s accounted for." % line["text"])
else:
s3_debug("%s not accounted for." % line["text"])
# -------------------------------------------------------------------------
def recv_sent_shipment(self, method, user, WB_ref, item_list):
"""
Helper method that will receive the sent shipment.
This supports two methods:
method = "warehouse"
====================
This requires going to the receiving warehouse
Selecting the shipment (using the WB reference)
Opening each item and selecting the received totals
Then receive the shipment
method = "search"
====================
Search for all received shipments
Select the matching WB reference
Opening each item and selecting the received totals
Then receive the shipment
Finally:
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
browser = self.browser
if method == "search":
self.login(account=user, nexturl="inv/recv/search")
# Find the WB reference in the dataTable (filter so only one is displayed)
el = browser.find_element_by_id("recv_search_simple")
el.send_keys(WB_ref)
# Submit the search
browser.find_element_by_css_selector("input[type='submit']").submit()
# Select the only row in the dataTable
if not self.dt_action():
fail("Unable to select the incoming shipment with reference %s" % WB_ref)
elif method == "warehouse":
return # not yet implemented
else:
fail("Unknown method of %s" % method)
return # invalid method
#####################################################
# We are now viewing the details of the receive item
#####################################################
# Now get the recv id from the url
url = browser.current_url
url_parts = url.split("/")
try:
recv_id = int(url_parts[-1])
except:
recv_id = int(url_parts[-2])
# Click on the items tab
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
data = []
for item in item_list:
# Find the item in the dataTable
self.dt_filter(item[0])
self.dt_action()
el = browser.find_element_by_id("inv_track_item_recv_quantity")
el.send_keys(item[1])
text = "%s %s" % (item[1], item[0])
data.append({"text" : text,
"record" : item[2]})
# Save the form
browser.find_element_by_css_selector("input[type='submit']").submit()
# Now receive the shipment and check the totals
self.recv_shipment(user, recv_id, data)
# -------------------------------------------------------------------------
# Functions which extract data from the create results
#
def send_rec(self, result):
"""
Simple helper function to get the newly created inv_send row
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send
return None
def send_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_send row so it can be used to open the record
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.id
return None
def send_get_ref(self, result):
"""
Simple helper function to get the waybill reference of the newly
created inv_send row so it can be used to filter dataTables
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.send_ref
return None
# -------------------------------------------------------------------------
def recv_rec(self, result):
"""
Simple helper function to get the newly created inv_recv row
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv
return None
# -------------------------------------------------------------------------
def recv_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_recv row so it can be used to open the record
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv.id
return None
# -------------------------------------------------------------------------
# Callback used to retrieve additional data to the create results
#
def dbcallback_getStockLevels(self, table, data, rows):
"""
Callback to add the total in stock for the selected item.
This can then be used to look at the value before and after
to ensure that the totals have been removed from the warehouse.
The stock row will be added to the *end* of the list of rows
"""
table = current.s3db["inv_inv_item"]
for details in data:
if details[0] == "send_inv_item_id":
inv_item_id = details[1]
break
stock_row = table[inv_item_id]
rows.records.append(stock_row)
return rows
| {
"repo_name": "anubhav929/eden",
"path": "modules/tests/inv/helper.py",
"copies": "1",
"size": "14536",
"license": "mit",
"hash": -4250696132323733500,
"line_mean": 40.0621468927,
"line_max": 217,
"alpha_frac": 0.5163043478,
"autogenerated": false,
"ratio": 4.369101292455666,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017655483418332205,
"num_lines": 354
} |
__all__ = ["send",
"track_send_item",
"send_shipment",
"receive",
"track_recv_item",
"recv_shipment",
"recv_sent_shipment",
"send_rec",
"send_get_id",
"send_get_ref",
"recv_rec",
"recv_get_id",
"dbcallback_getStockLevels",
]
import time
from gluon import current
from s3 import s3_debug
from tests.web2unittest import SeleniumUnitTest
class InvTestFunctions(SeleniumUnitTest):
def send(self, user, data):
"""
@case: INV
@description: Functions which runs specific workflows for Inventory tes
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/send/create")
table = "inv_send"
result = self.create(table, data)
s3_debug("WB reference: %s" % self.send_get_ref(result))
return result
# -------------------------------------------------------------------------
def track_send_item(self, user, send_id, data, removed=True):
"""
Helper method to add a track item to the inv_send with the
given send_id by the given user
"""
time.sleep(2) # give the browser time to execute all scripts
self.login(account=user, nexturl="inv/send/%s/track_item" % send_id)
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
table = "inv_track_item"
result = self.create(table, data, dbcallback = self.dbcallback_getStockLevels)
# Get the last record in the before & after
# this will give the stock record which has been added to the end by
# the getStockLevels callback
if removed:
qnty = 0
for line in data:
if line[0] == "quantity":
qnty = float(line[1])
break
stock_before = result["before"].records[len(result["before"])-1].quantity
stock_after = result["after"].records[len(result["after"])-1].quantity
stock_shipped = qnty
self.assertTrue( stock_before - stock_after == stock_shipped, "Warehouse stock not properly adjusted, was %s should be %s but is recorded as %s" % (stock_before, stock_after, stock_before - stock_shipped))
s3_debug ("Stock level before %s, stock level after %s" % (stock_before, stock_after))
return result
# -------------------------------------------------------------------------
def send_shipment(self, user, send_id):
"""
Helper method to send a shipment with id of send_id
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 0, "Shipment is not status preparing")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 1, "Shipment item is not status preparing")
s3_debug("Shipment items are all of status: preparing")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send_process/%s" % send_id)
db.commit() # Close transaction - otherwise we get a cached response
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# -------------------------------------------------------------------------
def confirm_received_shipment(self, user, send_id):
"""
Helper method to confirm that a shipment has been received
outside of the system. This means that the items in the
shipment will not be recorded as being at a site but
the status of the shipment will be modified.
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send/%s?received=True" % send_id)
db.commit() # Close transaction - otherwise we get a cached response
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 1, "Shipment is not status received")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 4, "Shipment item is not status arrived")
s3_debug("Shipment items are all of status: arrived")
# -------------------------------------------------------------------------
def receive(self, user, data):
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/recv/create")
table = "inv_recv"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def track_recv_item(self, user, recv_id, data, removed=True):
"""
Helper method to add a track item to the inv_recv with the
given recv_id
"""
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
table = "inv_track_item"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def recv_shipment(self, user, recv_id, data):
"""
Helper method that will receive the shipment, adding the
totals that arrived
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
db = current.db
s3db = current.s3db
rvtable = s3db.inv_recv
iitable = s3db.inv_inv_item
# First get the site_id
query = (rvtable.id == recv_id)
record = db(query).select(rvtable.site_id,
limitby=(0, 1)).first()
site_id = record.site_id
# Now get all the inventory items for the site
query = (iitable.site_id == site_id)
before = db(query).select(orderby=iitable.id)
self.login(account=user, nexturl="inv/recv_process/%s" % recv_id)
db.commit() # Close transaction - otherwise we get a cached response
query = (iitable.site_id == site_id)
after = db(query).select(orderby=iitable.id)
# Find the differences between the before and the after
changes = []
for a_rec in after:
found = False
for b_rec in before:
if a_rec.id == b_rec.id:
if a_rec.quantity != b_rec.quantity:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity - b_rec.quantity)
)
found = True
break
if not found:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity)
)
# changes now contains the list of changed or new records
# these should match the records received
# first check are the lengths the same?
self.assertTrue(len(data) == len(changes),
"The number of changed inventory items (%s) doesn't match the number of items received (%s)." %
(len(changes), len(data))
)
for line in data:
rec = line["record"]
found = False
for change in changes:
if rec.inv_track_item.item_id == change[0] and \
rec.inv_track_item.item_pack_id == change[1] and \
rec.inv_track_item.quantity == change[2]:
found = True
break
if found:
s3_debug("%s accounted for." % line["text"])
else:
s3_debug("%s not accounted for." % line["text"])
# -------------------------------------------------------------------------
def recv_sent_shipment(self, method, user, WB_ref, item_list):
"""
Helper method that will receive the sent shipment.
This supports two methods:
method = "warehouse"
====================
This requires going to the receiving warehouse
Selecting the shipment (using the WB reference)
Opening each item and selecting the received totals
Then receive the shipment
method = "search"
====================
Search for all received shipments
Select the matching WB reference
Opening each item and selecting the received totals
Then receive the shipment
Finally:
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
browser = self.browser
if method == "search":
self.login(account=user, nexturl="inv/recv/search")
# Find the WB reference in the dataTable (filter so only one is displayed)
el = browser.find_element_by_id("recv_search_simple")
el.send_keys(WB_ref)
# Submit the search
browser.find_element_by_css_selector("input[type='submit']").submit()
# Select the only row in the dataTable
if not self.dt_action():
fail("Unable to select the incoming shipment with reference %s" % WB_ref)
elif method == "warehouse":
return # not yet implemented
else:
fail("Unknown method of %s" % method)
return # invalid method
#####################################################
# We are now viewing the details of the receive item
#####################################################
# Now get the recv id from the url
url = browser.current_url
url_parts = url.split("/")
try:
recv_id = int(url_parts[-1])
except:
recv_id = int(url_parts[-2])
# Click on the items tab
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
data = []
for item in item_list:
# Find the item in the dataTable
self.dt_filter(item[0])
self.dt_action()
el = browser.find_element_by_id("inv_track_item_recv_quantity")
el.send_keys(item[1])
text = "%s %s" % (item[1], item[0])
data.append({"text" : text,
"record" : item[2]})
# Save the form
browser.find_element_by_css_selector("input[type='submit']").submit()
# Now receive the shipment and check the totals
self.recv_shipment(user, recv_id, data)
# -------------------------------------------------------------------------
# Functions which extract data from the create results
#
def send_rec(self, result):
"""
Simple helper function to get the newly created inv_send row
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send
return None
def send_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_send row so it can be used to open the record
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.id
return None
def send_get_ref(self, result):
"""
Simple helper function to get the waybill reference of the newly
created inv_send row so it can be used to filter dataTables
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.send_ref
return None
# -------------------------------------------------------------------------
def recv_rec(self, result):
"""
Simple helper function to get the newly created inv_recv row
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv
return None
# -------------------------------------------------------------------------
def recv_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_recv row so it can be used to open the record
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv.id
return None
# -------------------------------------------------------------------------
# Callback used to retrieve additional data to the create results
#
def dbcallback_getStockLevels(self, table, data, rows):
"""
Callback to add the total in stock for the selected item.
This can then be used to look at the value before and after
to ensure that the totals have been removed from the warehouse.
The stock row will be added to the *end* of the list of rows
"""
table = current.s3db["inv_inv_item"]
inv_item_id = None
for details in data:
if details[0] == "send_inv_item_id":
inv_item_id = details[1]
break
if inv_item_id:
stock_row = table[inv_item_id]
rows.records.append(stock_row)
return rows
# END =========================================================================
| {
"repo_name": "tudorian/eden",
"path": "modules/tests/inv/helper.py",
"copies": "28",
"size": "16790",
"license": "mit",
"hash": -5690788515202574000,
"line_mean": 38.8812351544,
"line_max": 217,
"alpha_frac": 0.522334723,
"autogenerated": false,
"ratio": 4.332903225806452,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ('SensorNotFound', 'SensorListener', 'sensortypes')
from jnius import PythonJavaClass, java_method, autoclass, cast
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Context = autoclass('android.content.Context')
Sensor = autoclass('android.hardware.Sensor')
SensorManager = autoclass('android.hardware.SensorManager')
sensortypes = {
'accelerometer': Sensor.TYPE_ACCELEROMETER,
'magnetic-field': Sensor.TYPE_MAGNETIC_FIELD,
'gyroscope': Sensor.TYPE_GYROSCOPE,
'light': Sensor.TYPE_LIGHT,
'pressure': Sensor.TYPE_PRESSURE,
'proximity': Sensor.TYPE_PROXIMITY,
'linear-acceleration': Sensor.TYPE_LINEAR_ACCELERATION,
#'rotation-vector': Sensor.TYPE_ROTATION_VECTOR, #API 9
'orientation': Sensor.TYPE_ORIENTATION,
#'humidity': Sensor.TYPE_RELATIVE_HUMDITY, #API 14
'ambient-temperature': Sensor.TYPE_AMBIENT_TEMPERATURE }
class SensorNotFound(Exception):
pass
class SensorListener(PythonJavaClass):
__javainterfaces__ = ['android/hardware/SensorEventListener']
def __init__(self, sensortype, callback):
super(SensorListener, self).__init__()
self.callback = callback
assert(sensortype in sensortypes)
self.manager = cast('android.hardware.SensorManager',
PythonActivity.mActivity.getSystemService(Context.SENSOR_SERVICE))
self.do_stop = False
java_sensortype = sensortypes[sensortype]
self.sensortype = java_sensortype
self.sensor = self.manager.getDefaultSensor(java_sensortype)
if self.sensor is None:
raise SensorNotFound()
def start(self):
self.do_stop = False
self.manager.registerListener(self, self.sensor,
SensorManager.SENSOR_DELAY_NORMAL)
def stop(self):
self.do_stop = True
self.manager.unregisterListener(self, self.sensor)
@java_method('()I')
def hashCode(self):
return id(self)
@java_method('(Landroid/hardware/Sensor;I)V')
def onAccuracyChanged(self, sensor, accuracy):
if self.do_stop:
print 'sensor avoided accuracy-changed, stop has been called.'
return
self.callback(self, 'accuracy-changed', sensor, accuracy)
@java_method('(Landroid/hardware/SensorEvent;)V')
def onSensorChanged(self, event):
if self.do_stop:
print 'sensor avoided sensor-changed, stop has been called.'
return
self.callback(self, 'sensor-changed', event)
| {
"repo_name": "tshirtman/pyconfr-2013-kivy",
"path": "jnius_3.py",
"copies": "1",
"size": "2505",
"license": "mit",
"hash": -4945805223044634000,
"line_mean": 33.7916666667,
"line_max": 82,
"alpha_frac": 0.6786427146,
"autogenerated": false,
"ratio": 3.6676427525622253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9803189135093344,
"avg_score": 0.008619266413776179,
"num_lines": 72
} |
__all__ = ["SentryNotifier"]
from freight import http
from freight.models import App, TaskStatus
from .base import Notifier, NotifierEvent
class SentryNotifier(Notifier):
def get_options(self):
return {"webhook_url": {"required": True}}
def should_send_deploy(self, deploy, task, config, event):
if event == NotifierEvent.TASK_STARTED:
return True
if event == NotifierEvent.TASK_FINISHED and task.status == TaskStatus.finished:
return True
return False
def send_deploy(self, deploy, task, config, event):
webhook_url = config["webhook_url"]
app = App.query.get(deploy.app_id)
payload = {
"number": deploy.number,
"app_name": app.name,
"params": dict(task.params or {}),
"env": deploy.environment,
"ref": task.ref,
"sha": task.sha,
"duration": task.duration,
"event": "started" if event == NotifierEvent.TASK_STARTED else "finished",
"dateStarted": task.date_started.isoformat() + "Z"
if task.date_started
else None,
"dateReleased": task.date_finished.isoformat() + "Z"
if task.date_finished
else None,
"link": http.absolute_uri(
f"/deploys/{app.name}/{deploy.environment}/{deploy.number}/"
),
}
http.post(webhook_url, json=payload)
| {
"repo_name": "getsentry/freight",
"path": "freight/notifiers/sentry.py",
"copies": "1",
"size": "1459",
"license": "apache-2.0",
"hash": 4609676488881996300,
"line_mean": 30.0425531915,
"line_max": 87,
"alpha_frac": 0.5702535984,
"autogenerated": false,
"ratio": 4.030386740331492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00048633806887658684,
"num_lines": 47
} |
__all__ = ["sentry_tower", "machine_gun_tower", "rocket_tower"]
from .tools import create_building
SENTRY_TOWER_BASIS = {
'alias': 'sniper',
'type': 'sentryGun',
'firing_range': 12,
'rate_of_fire': 0.7,
'role': 'tower',
'status': 'idle',
'size': 3,
'level': None,
'tile_position': None,
'player_id': None,
'code': None,
}
SENTRY_TOWERS = {
1: dict(damage_per_shot=56, hit_points=1000, **SENTRY_TOWER_BASIS),
2: dict(damage_per_shot=65, hit_points=1200, **SENTRY_TOWER_BASIS),
3: dict(damage_per_shot=75, hit_points=1400, **SENTRY_TOWER_BASIS),
4: dict(damage_per_shot=87, hit_points=1700, **SENTRY_TOWER_BASIS),
5: dict(damage_per_shot=100, hit_points=2100, **SENTRY_TOWER_BASIS),
}
MACHINE_GUN_TOWER_BASIS = {
'alias': 'machine_gun',
'type': 'machineGun',
'firing_range': 5,
'rate_of_fire': 10,
'role': 'tower',
'status': 'idle',
'size': 2,
'level': None,
'tile_position': None,
'player_id': None,
'code': None,
}
MACHINE_GUN_TOWERS = {
1: dict(damage_per_shot=10, hit_points=1600, **MACHINE_GUN_TOWER_BASIS),
2: dict(damage_per_shot=15, hit_points=1750, **MACHINE_GUN_TOWER_BASIS),
3: dict(damage_per_shot=20, hit_points=2000, **MACHINE_GUN_TOWER_BASIS),
}
ROCKET_TOWER_BASIS = {
'alias': 'machine_gun',
'type': 'machineGun',
'firing_range': 10,
'rate_of_fire': 0.4,
'role': 'tower',
'status': 'idle',
'size': 2,
'level': None,
'tile_position': None,
'player_id': None,
'code': None,
}
ROCKET_TOWERS = {
1: dict(damage_per_shot=150, hit_points=2000, **ROCKET_TOWER_BASIS),
2: dict(damage_per_shot=200, hit_points=2180, **ROCKET_TOWER_BASIS),
3: dict(damage_per_shot=300, hit_points=2370, **ROCKET_TOWER_BASIS),
4: dict(damage_per_shot=420, hit_points=2600, **ROCKET_TOWER_BASIS),
}
def sentry_tower(level: int, tile_position: [int, int], player_id: int, code_id: int) -> dict:
return create_building(SENTRY_TOWERS, level, tile_position, player_id, code_id)
def machine_gun_tower(level: int, tile_position: [int, int], player_id: int, code_id: int) -> dict:
return create_building(MACHINE_GUN_TOWERS, level, tile_position, player_id, code_id)
def rocket_tower(level: int, tile_position: [int, int], player_id: int, code_id: int) -> dict:
return create_building(ROCKET_TOWERS, level, tile_position, player_id, code_id)
| {
"repo_name": "CheckiO/EoC-battle-mocks",
"path": "battle_mocks/towers.py",
"copies": "1",
"size": "2424",
"license": "mit",
"hash": -5265394104865299000,
"line_mean": 30.0769230769,
"line_max": 99,
"alpha_frac": 0.6262376238,
"autogenerated": false,
"ratio": 2.6205405405405404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.374677816434054,
"avg_score": null,
"num_lines": null
} |
__all__ = ['seq_cpg_analysis']
def drop_small_region(items, length):
"""
>>> drop_small_region([(10,20),(100,300),(1000,2000)], 0)
[(10, 20), (100, 300), (1000, 2000)]
>>> drop_small_region([(10,20),(100,300),(1000,2000)], 200)
[(100, 300), (1000, 2000)]
>>> drop_small_region([(10,20),(100,300),(1000,2000)], 201)
[(1000, 2000)]
>>> drop_small_region([(10,20),(100,300),(1000,2000)], 2000)
[]
"""
ret = []
for p, q in items:
if q-p >= length:
ret.append((p, q))
return ret
def region_growing(items, gap=0):
"""
>>> region_growing([(10,20),(100,300),(1000,2000)])
[(10, 20), (100, 300), (1000, 2000)]
>>> region_growing([(10,20),(15,30),(30,50)])
[(10, 50)]
"""
if len(items) < 2:
return items
p, q = items[0]
rest = items[1:]
for i in range(len(rest)):
r, s = rest[i]
assert(p <= r) # invalid input
# p q
# r s
if r <= q + gap:
q = max(q, s)
continue
else:
return [(p,q)]+region_growing(rest[i:],gap)
return [(p,q)]
# http://genomewiki.ucsc.edu/index.php/CpG_Islands
# NOTE: this cpg island implementation is ad-hoc.
class cpgisland_searcher(object):
def __init__(self, length, window):
self.length = length
self.window = window
self.met_criteria = False
self.islands = []
self.start = 0
def p(self, i, met_criteria):
"""point each base"""
if self.met_criteria and (not met_criteria):
"""region exit"""
self.islands.append((self.start, i-1))
self.met_criteria = False
elif (not self.met_criteria) and met_criteria:
"""region enter"""
self.start = i
self.met_criteria = True
def finish(self):
self.p(self.length, False)
h = self.window/2
connected = region_growing([(p-h, q+h) for p,q in self.islands])
# Two individual CpG islands were connected if they were separated by less than 100 bp
gap_connected = region_growing(connected, 100)
return drop_small_region(gap_connected, 500)
def seq_cpg_analysis(seq, window):
"""
calculate gc percent for entire sequence.
"""
seqstr = str(seq).upper()
l = len(seq)
h = int(window/2)
gc_per = []
obs = []
sr = cpgisland_searcher(l,window)
for i in range(0,l):
p = max(0,i-h)
q = min(i+h,l)
n = q-p
c = seqstr.count('C',p,q)
g = seqstr.count('G',p,q)
cg = seqstr.count('CG',p,q)
gcp = 1.*(c+g)/n
oe = 1.*n*cg/(c*g) if (c*g)!=0 else 0
gc_per.append(gcp)
obs.append(oe)
island_criteria = (gcp > 0.55) and (oe > 0.65)
sr.p(i, island_criteria)
cpg_islands = sr.finish()
return gc_per, obs, cpg_islands
def cpg_obs_per_exp(seq):
"""
CpG islands in vertebrate genomes {Gardiner-Garden, 1987, p02206}
'Obs/Exp CpG' = N * 'Number of CpG' / 'Number of C' * 'Number of G'
where, N is the total number of nucleotide in the sequence being analyzed.
2 >= Obs/Exp >= 0
>>> cpg_obs_per_exp('GC')
0.0
>>> cpg_obs_per_exp('CG')
2.0
>>> cpg_obs_per_exp('CGCGCGCG')
2.0
>>> cpg_obs_per_exp('CGGCCGGCCGGC')
1.0
"""
n = len(seq)
seqstr = str(seq)
n = len(seqstr)
c = seqstr.count('C')
g = seqstr.count('G')
cpg = seqstr.count('CG')
oe = 1.*n*cpg/(c*g) if (c*g)!=0 else 0
return oe
| {
"repo_name": "mizuy/seqtool",
"path": "seqtool/nucleotide/cpgisland.py",
"copies": "1",
"size": "3601",
"license": "mit",
"hash": -4189080280865785300,
"line_mean": 26.0751879699,
"line_max": 94,
"alpha_frac": 0.5173562899,
"autogenerated": false,
"ratio": 3.0133891213389123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40307454112389124,
"avg_score": null,
"num_lines": null
} |
__all__ = ['seqToKV', 'kvToSeq', 'dictToKV', 'kvToDict']
from openid import oidutil
import types
class KVFormError(ValueError):
pass
def seqToKV(seq, strict=False):
"""Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: str
"""
def err(msg):
formatted = 'seqToKV warning: %s: %r' % (msg, seq)
if strict:
raise KVFormError(formatted)
else:
oidutil.log(formatted)
lines = []
for k, v in seq:
if isinstance(k, types.StringType):
k = k.decode('UTF8')
elif not isinstance(k, types.UnicodeType):
err('Converting key to string: %r' % k)
k = str(k)
if '\n' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains newline: %r' % (k,))
if ':' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains colon: %r' % (k,))
if k.strip() != k:
err('Key has whitespace at beginning or end: %r' % (k,))
if isinstance(v, types.StringType):
v = v.decode('UTF8')
elif not isinstance(v, types.UnicodeType):
err('Converting value to string: %r' % (v,))
v = str(v)
if '\n' in v:
raise KVFormError(
'Invalid input for seqToKV: value contains newline: %r' % (v,))
if v.strip() != v:
err('Value has whitespace at beginning or end: %r' % (v,))
lines.append(k + ':' + v + '\n')
return ''.join(lines).encode('UTF8')
def kvToSeq(data, strict=False):
"""
After one parse, seqToKV and kvToSeq are inverses, with no warnings::
seq = kvToSeq(s)
seqToKV(kvToSeq(seq)) == seq
"""
def err(msg):
formatted = 'kvToSeq warning: %s: %r' % (msg, data)
if strict:
raise KVFormError(formatted)
else:
oidutil.log(formatted)
lines = data.split('\n')
if lines[-1]:
err('Does not end in a newline')
else:
del lines[-1]
pairs = []
line_num = 0
for line in lines:
line_num += 1
# Ignore blank lines
if not line.strip():
continue
pair = line.split(':', 1)
if len(pair) == 2:
k, v = pair
k_s = k.strip()
if k_s != k:
fmt = ('In line %d, ignoring leading or trailing '
'whitespace in key %r')
err(fmt % (line_num, k))
if not k_s:
err('In line %d, got empty key' % (line_num,))
v_s = v.strip()
if v_s != v:
fmt = ('In line %d, ignoring leading or trailing '
'whitespace in value %r')
err(fmt % (line_num, v))
pairs.append((k_s.decode('UTF8'), v_s.decode('UTF8')))
else:
err('Line %d does not contain a colon' % line_num)
return pairs
def dictToKV(d):
seq = d.items()
seq.sort()
return seqToKV(seq)
def kvToDict(s):
return dict(kvToSeq(s))
| {
"repo_name": "vbshah1992/microblog",
"path": "flask/lib/python2.7/site-packages/openid/kvform.py",
"copies": "146",
"size": "3294",
"license": "bsd-3-clause",
"hash": -7146062292637374000,
"line_mean": 25.7804878049,
"line_max": 79,
"alpha_frac": 0.5085003036,
"autogenerated": false,
"ratio": 3.588235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ['seqToKV', 'kvToSeq', 'dictToKV', 'kvToDict']
import types
import logging
class KVFormError(ValueError):
pass
def seqToKV(seq, strict=False):
"""Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: str
"""
def err(msg):
formatted = 'seqToKV warning: %s: %r' % (msg, seq)
if strict:
raise KVFormError(formatted)
else:
logging.warn(formatted)
lines = []
for k, v in seq:
if isinstance(k, types.StringType):
k = k.decode('UTF8')
elif not isinstance(k, types.UnicodeType):
err('Converting key to string: %r' % k)
k = str(k)
if '\n' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains newline: %r' % (k,))
if ':' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains colon: %r' % (k,))
if k.strip() != k:
err('Key has whitespace at beginning or end: %r' % (k,))
if isinstance(v, types.StringType):
v = v.decode('UTF8')
elif not isinstance(v, types.UnicodeType):
err('Converting value to string: %r' % (v,))
v = str(v)
if '\n' in v:
raise KVFormError(
'Invalid input for seqToKV: value contains newline: %r' % (v,))
if v.strip() != v:
err('Value has whitespace at beginning or end: %r' % (v,))
lines.append(k + ':' + v + '\n')
return ''.join(lines).encode('UTF8')
def kvToSeq(data, strict=False):
"""
After one parse, seqToKV and kvToSeq are inverses, with no warnings::
seq = kvToSeq(s)
seqToKV(kvToSeq(seq)) == seq
"""
def err(msg):
formatted = 'kvToSeq warning: %s: %r' % (msg, data)
if strict:
raise KVFormError(formatted)
else:
logging.warn(formatted)
lines = data.split('\n')
if lines[-1]:
err('Does not end in a newline')
else:
del lines[-1]
pairs = []
line_num = 0
for line in lines:
line_num += 1
# Ignore blank lines
if not line.strip():
continue
pair = line.split(':', 1)
if len(pair) == 2:
k, v = pair
k_s = k.strip()
if k_s != k:
fmt = ('In line %d, ignoring leading or trailing '
'whitespace in key %r')
err(fmt % (line_num, k))
if not k_s:
err('In line %d, got empty key' % (line_num,))
v_s = v.strip()
if v_s != v:
fmt = ('In line %d, ignoring leading or trailing '
'whitespace in value %r')
err(fmt % (line_num, v))
pairs.append((k_s.decode('UTF8'), v_s.decode('UTF8')))
else:
err('Line %d does not contain a colon' % line_num)
return pairs
def dictToKV(d):
seq = d.items()
seq.sort()
return seqToKV(seq)
def kvToDict(s):
return dict(kvToSeq(s))
| {
"repo_name": "jcnelson/syndicate",
"path": "old/ms/openid/kvform.py",
"copies": "13",
"size": "3283",
"license": "apache-2.0",
"hash": -1544924639849895000,
"line_mean": 25.9098360656,
"line_max": 79,
"alpha_frac": 0.507767286,
"autogenerated": false,
"ratio": 3.599780701754386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["SeqValue"]
class SeqValue:
""" This represents a sequence value read from a contents.xml
file, either from the <contents> or the <package> section. It's
represented as series of dotted integers in the xml file, and
stored internally as a tuple of integers.
It may be incremented, which increments only the last integer in
the series; or it may be compared with another SeqValue, which
compares all of the integers componentwise. """
def __init__(self, value = None):
self.value = ()
if value is not None:
self.set(value)
def set(self, value):
""" Sets the seq from the indicated value of unspecified
type. """
if isinstance(value, tuple):
self.setFromTuple(value)
elif isinstance(value, str):
self.setFromString(value)
else:
raise TypeError('Invalid sequence type: %s' % (value,))
def setFromTuple(self, value):
""" Sets the seq from the indicated tuple of integers. """
assert isinstance(value, tuple)
self.value = value
def setFromString(self, value):
""" Sets the seq from the indicated string of dot-separated
integers. Raises ValueError on error. """
assert isinstance(value, str)
self.value = ()
if value:
value = value.split('.')
value = map(int, value)
self.value = tuple(value)
def loadXml(self, xelement, attribute = 'seq'):
""" Reads the seq from the indicated XML element. Returns
true if loaded, false if not given or if there was an
error. """
self.value = ()
value = xelement.Attribute(attribute)
if value:
try:
self.setFromString(value)
except ValueError:
return False
return True
return False
def storeXml(self, xelement, attribute = 'seq'):
""" Adds the seq to the indicated XML element. """
if self.value:
value = '.'.join(map(str, self.value))
xelement.SetAttribute(attribute, value)
def __add__(self, inc):
""" Increments the seq value, returning the new value. """
if not self.value:
value = (1,)
else:
value = self.value[:-1] + (self.value[-1] + inc,)
return SeqValue(value)
def __cmp__(self, other):
""" Compares to another seq value. """
return cmp(self.value, other.value)
def __bool__(self):
return bool(self.value)
def __str__(self):
return 'SeqValue%s' % (repr(self.value))
| {
"repo_name": "brakhane/panda3d",
"path": "direct/src/p3d/SeqValue.py",
"copies": "1",
"size": "2660",
"license": "bsd-3-clause",
"hash": 1607253535921754400,
"line_mean": 30.2941176471,
"line_max": 68,
"alpha_frac": 0.5763157895,
"autogenerated": false,
"ratio": 4.367816091954023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014237610482280148,
"num_lines": 85
} |
__all__ = ["SeqValue"]
import types
class SeqValue:
""" This represents a sequence value read from a contents.xml
file, either from the <contents> or the <package> section. It's
represented as series of dotted integers in the xml file, and
stored internally as a tuple of integers.
It may be incremented, which increments only the last integer in
the series; or it may be compared with another SeqValue, which
compares all of the integers componentwise. """
def __init__(self, value = None):
self.value = ()
if value is not None:
self.set(value)
def set(self, value):
""" Sets the seq from the indicated value of unspecified
type. """
if isinstance(value, types.TupleType):
self.setFromTuple(value)
elif isinstance(value, types.StringTypes):
self.setFromString(value)
else:
raise TypeError, 'Invalid sequence type: %s' % (value,)
def setFromTuple(self, value):
""" Sets the seq from the indicated tuple of integers. """
assert isinstance(value, types.TupleType)
self.value = value
def setFromString(self, value):
""" Sets the seq from the indicated string of dot-separated
integers. Raises ValueError on error. """
assert isinstance(value, types.StringTypes)
self.value = ()
if value:
value = value.split('.')
value = map(int, value)
self.value = tuple(value)
def loadXml(self, xelement, attribute = 'seq'):
""" Reads the seq from the indicated XML element. Returns
true if loaded, false if not given or if there was an
error. """
self.value = ()
value = xelement.Attribute(attribute)
if value:
try:
self.setFromString(value)
except ValueError:
return False
return True
return False
def storeXml(self, xelement, attribute = 'seq'):
""" Adds the seq to the indicated XML element. """
if self.value:
value = '.'.join(map(str, self.value))
xelement.SetAttribute(attribute, value)
def __add__(self, inc):
""" Increments the seq value, returning the new value. """
if not self.value:
value = (1,)
else:
value = self.value[:-1] + (self.value[-1] + inc,)
return SeqValue(value)
def __cmp__(self, other):
""" Compares to another seq value. """
return cmp(self.value, other.value)
def __bool__(self):
return bool(self.value)
def __str__(self):
return 'SeqValue%s' % (repr(self.value))
| {
"repo_name": "mgracer48/panda3d",
"path": "direct/src/p3d/SeqValue.py",
"copies": "1",
"size": "2722",
"license": "bsd-3-clause",
"hash": 2419831020425167400,
"line_mean": 30.2873563218,
"line_max": 68,
"alpha_frac": 0.5833945628,
"autogenerated": false,
"ratio": 4.348242811501597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5431637374301598,
"avg_score": null,
"num_lines": null
} |
__all__ = ["SerialImpl"]
from utility.log import VLOG
from utility.status import *
import serial
import time
class SerialImpl(object):
def __init__(self):
# Serial Port RS232 basic configure parameters
self.ser_ = serial.Serial('/dev/ttyUSB0')
self.ser_.setBaudrate(9600)
self.ser_.setStopbits(serial.STOPBITS_ONE)
self.ser_.setByteSize(serial.EIGHTBITS)
self.ser_.setParity(serial.PARITY_NONE)
self.ser_.setTimeout(3)
self.ser_.setWriteTimeout(3)
self.ser_.flushInput()
self.ser_.flushOutput()
self.ShowParameters()
def Defaut(self):
# Serial Port RS232 basic configure parameters
self.ser_.setBaudrate(9600)
self.ser_.setStopbits(serial.STOPBITS_ONE)
self.ser_.setByteSize(serial.EIGHTBITS)
self.ser_.setParity(serial.PARITY_NONE)
self.ser_.setTimeout(3)
self.ser_.setWriteTimeout(3)
self.ser_.flushInput()
self.ser_.flushOutput()
return self.ShowParameters()
def IsOpen(self):
return self.ser_.isOpen()
def Close(self):
if self.IsOpen():
self.ser_.close()
def SetReadTimeout(self, timeout):
try:
self.ser_.setTimeout(timeout)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set read timeout %d" % self.ser_.timeout)
return Status(kOk)
def readtimeout(self):
return self.ser_.timeout
def SetWriteTimeout(self, timeout):
try:
self.ser_.setWriteTimeout(timeout)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set write timeout %d" % self.ser_.writeTimeout)
return Status(kOk)
def writetimeout(self):
return self.ser_.writeTimeout
def SetBaudRate(self, baudrate):
try:
self.ser_.setBaudrate(baudrate)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set baudrate %d" % self.ser_.stopbits)
return Status(kOk)
def baudrate(self):
return self.ser_.baudrate
def SetStopBits(self, stopbits):
try:
self.ser_.setStopbits(stopbits)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set stopbit %d" % self.ser_.stopbits)
return Status(kOk)
def stopbits(self):
return self.ser_.stopbits
def SetByteSize(self, bytesize):
try:
self.ser_.setByteSize(bytesize)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set bytesize %d" % self.ser_.bytesize)
return Status(kOk)
def bytesize(self):
return self.ser_.bytesize
def SetParity(self, parity):
try:
self.ser_.setParity(parity)
except serial.SerialException, e:
VLOG(3, e)
return Status(kUnknownError, e)
VLOG(0, "set parity %s" % self.ser_.parity)
return Status(kOk)
def parity(self):
return self.ser_.parity
def ShowParameters(self):
declaim = "\
-----------------------------------------\n\
| Basic Parameters of RS232 |\n\
-----------------------------------------\n\
| fileno: " + str(self.ser_.fileno()) + "\n\
| port: " + self.ser_.getPort() + "\n\
| baudrate: " + str(self.ser_.baudrate) + "\n\
| bytesize: " + str(self.ser_.bytesize) + "\n\
| stopbits: " + str(self.ser_.stopbits) + "\n\
| parity: " + self.ser_.parity + "\n\
| read-timeout: " + str(self.ser_.timeout) + "\n\
| write-timeout: " + str(self.ser_.writeTimeout) + "\n\
-----------------------------------------"
print declaim
return Status(kOk)
def ForTest(self):
VLOG(0, "Should move this blocks when RS232 passed.")
status = self.SetBaudRate(115200)
if status.IsError():
return status
self.SetStopBits(serial.STOPBITS_TWO)
if status.IsError():
return status
self.SetByteSize(serial.SEVENBITS)
if status.IsError():
return status
self.SetParity(serial.PARITY_EVEN)
if status.IsError():
return status
self.SetReadTimeout(1)
if status.IsError():
return status
self.SetWriteTimeout(1)
if status.IsError():
return status
return self.ShowParameters()
def LoopBackTest(self):
VLOG(1, "connect TX and Rx pin in 5 second.")
time.sleep(5)
tx_data = "\
-----------------------------------------\n\
| Loop Back Test RS232 |\n\
-----------------------------------------\n\
| fileno: " + str(self.ser_.fileno()) + "\n\
| port: " + self.ser_.getPort() + "\n\
| baudrate: " + str(self.ser_.baudrate) + "\n\
| bytesize: " + str(self.ser_.bytesize) + "\n\
| stopbits: " + str(self.ser_.stopbits) + "\n\
| parity: " + self.ser_.parity + "\n\
| read-timeout: " + str(self.ser_.timeout) + "\n\
| write-timeout: " + str(self.ser_.writeTimeout) + "\n\
-----------------------------------------"
self.Write(tx_data)
rx_data = self.Read()
if rx_data != tx_data:
error = "failed loopback test RS232."
VLOG(3, error)
return Status(kUnknownError, error)
VLOG(0, "success loopback test RS232:\n %s" % rx_data)
return Status(kOk)
# return void whatever
def Write(self, data):
try:
self.ser_.write(data)
except serial.SerialTimeoutException, e:
VLOG(3, e)
# return receive data
def Read(self):
return self.ser_.read(size=1024)
| {
"repo_name": "weibohit/tools",
"path": "driver/serial_impl.py",
"copies": "1",
"size": "5395",
"license": "mit",
"hash": -5845878013984578000,
"line_mean": 26.8092783505,
"line_max": 60,
"alpha_frac": 0.5975903614,
"autogenerated": false,
"ratio": 3.2896341463414633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.930543511360473,
"avg_score": 0.01635787882734667,
"num_lines": 194
} |
__all__ = ["SerializerManager"]
class SerializerManager(object):
def __init__(self):
self.registry = {}
def add(self, type, cls_or_none=None):
if cls_or_none is not None:
return self.add(type)(cls_or_none)
def wrapped(cls):
self.registry[type] = cls()
return cls
return wrapped
def get(self, type):
return self.registry[type]
def serialize(self, value):
if not value:
return value
elif not isinstance(value, (list, tuple)):
return self.serialize([value])[0]
# elif isinstance(obj, dict):
# return dict((k, serialize(v, request=request)) for k, v in obj.iteritems())
try:
serializer = self.registry[type(value[0])]
except KeyError:
return value
attrs = serializer.get_attrs(item_list=value)
return [serializer(o, attrs=attrs.get(o, {})) for o in value]
default_manager = SerializerManager()
add = default_manager.add
serialize = default_manager.serialize
| {
"repo_name": "getsentry/freight",
"path": "freight/api/serializer/manager.py",
"copies": "1",
"size": "1073",
"license": "apache-2.0",
"hash": 1194482062909094700,
"line_mean": 25.825,
"line_max": 89,
"alpha_frac": 0.5852749301,
"autogenerated": false,
"ratio": 4.049056603773585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002777777777777778,
"num_lines": 40
} |
"""All serializers for the extension."""
from rest_framework import serializers
from geokey.users.serializers import UserSerializer
from geokey_sapelli.models import SapelliLogFile
class SapelliLogFileSerializer(serializers.ModelSerializer):
"""Serializer for geokey_sapelli.models.SapelliLogFile instances."""
creator = UserSerializer(fields=('id', 'display_name'))
isowner = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
file_type = serializers.SerializerMethodField()
class Meta:
"""Class meta information."""
model = SapelliLogFile
fields = (
'id', 'name', 'created_at', 'uploaded_at', 'creator', 'isowner',
'url', 'file_type')
def get_file_type(self, obj):
"""
Return the type of the SapelliLogFile.
Parameters
----------
obj : geokey_sapelli.models.SapelliLogFile
The instance that is being serialised.
Returns
-------
str
The type of the instance, in this case it's 'SapelliLogFile'.
"""
return obj.type_name
def get_isowner(self, obj):
"""
Return `True` if the user is the creator of this file.
Parameters
----------
obj : geokey_sapelli.models.SapelliLogFile
The instance that is being serialised.
Returns
-------
Boolean
Indicating if user created the file.
"""
user = self.context.get('user')
if not user.is_anonymous():
return obj.creator == user
return False
def get_url(self, obj):
"""
Return the URL to access this file.
Parameters
----------
obj : geokey_sapelli.models.SapelliLogFile
The instance that is being serialised.
Returns
-------
str
The URL to access the file on client side.
"""
return obj.file.url
| {
"repo_name": "ExCiteS/geokey-sapelli",
"path": "geokey_sapelli/serializers.py",
"copies": "1",
"size": "2001",
"license": "mit",
"hash": -703307550348095600,
"line_mean": 25.3289473684,
"line_max": 76,
"alpha_frac": 0.5807096452,
"autogenerated": false,
"ratio": 4.516930022573363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
__all__ = ["serve_fs"]
import SimpleHTTPServer
import SocketServer
from fs.path import pathjoin, dirname
from fs.errors import FSError
from time import mktime
from cStringIO import StringIO
import cgi
import urllib
import posixpath
import time
import threading
import socket
def _datetime_to_epoch(d):
return mktime(d.timetuple())
class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""A hacked together version of SimpleHTTPRequestHandler"""
def __init__(self, fs, request, client_address, server):
self._fs = fs
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""Serve a GET request."""
f = None
try:
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
except socket.error:
pass
finally:
if f is not None:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if self._fs.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in ("index.html", "index.htm"):
index = pathjoin(path, index)
if self._fs.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
info = self._fs.getinfo(path)
f = self._fs.open(path, 'r')
except FSError, e:
self.send_error(404, str(e))
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(info['size']))
if 'modified_time' in info:
self.send_header("Last-Modified", self.date_time_string(_datetime_to_epoch(info['modified_time'])))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
dir_paths = self._fs.listdir(path, dirs_only=True)
file_paths = self._fs.listdir(path, files_only=True)
except FSError:
self.send_error(404, "No permission to list directory")
return None
paths = [p+'/' for p in sorted(dir_paths, key=lambda p:p.lower())] + sorted(file_paths, key=lambda p:p.lower())
#list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
parent = dirname(path)
if path != parent:
f.write('<li><a href="%s">../</a></li>' % urllib.quote(parent.rstrip('/') + '/'))
for path in paths:
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(path), cgi.escape(path)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
return path
def serve_fs(fs, address='', port=8000):
"""Serve an FS instance over http
:param fs: an FS object
:param address: IP address to serve on
:param port: port number
"""
def Handler(request, client_address, server):
return FSHTTPRequestHandler(fs, request, client_address, server)
#class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# pass
httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
#httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
try:
while True:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
httpd.shutdown()
if __name__ == "__main__":
from fs.osfs import OSFS
serve_fs(OSFS('~/')) | {
"repo_name": "atty303/pyfilesystem",
"path": "fs/expose/http.py",
"copies": "14",
"size": "5609",
"license": "bsd-3-clause",
"hash": 1333354941885863200,
"line_mean": 33.4171779141,
"line_max": 119,
"alpha_frac": 0.5683722589,
"autogenerated": false,
"ratio": 4.085214857975236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ServerInfo"]
import psutil
import time
from execute_async import ExecuteAsync
class ServerInfo(object):
@staticmethod
def Create():
server_info = {}
command = ExecuteAsync("uname -m")
command.Run()
(status, output) = command.GetResponse()
if status.IsOk():
server_info["architecture"] = output[:-1]
command = ExecuteAsync("pwd")
command.Run()
(status, output) = command.GetResponse()
if status.IsOk():
server_info["work_dir"] = output[:-1]
command = ExecuteAsync("uname -v")
command.Run()
(status, output) = command.GetResponse()
if status.IsOk():
server_info["distribution"] = output[:-1]
command = ExecuteAsync("uname -sr")
command.Run()
(status, output) = command.GetResponse()
if status.IsOk():
server_info["kernel"] = output[:-1]
server_info["disk_usage"] = ServerInfo.disk_usage()
server_info["virtual_memory"] = ServerInfo.virtual_memory()
server_info["access_time"] = time.asctime()
return server_info
@staticmethod
def virtual_memory():
total = psutil.virtual_memory().total >> 20
percent = psutil.virtual_memory().percent
used = psutil.virtual_memory().used >> 20
free = psutil.virtual_memory().free >> 20
virtual_memory = "total=%dMB, used=%dMB, free=%dMB, percent=%s" % (\
total, used, free, str(percent) + '%')
return virtual_memory
@staticmethod
def disk_usage():
total = psutil.disk_usage('/').total >> 30
percent = psutil.disk_usage('/').percent
free = psutil.disk_usage('/').free >> 30
used = psutil.disk_usage('/').used >> 30
disk_usage = "total=%dGB, used=%dGB, free=%dGB, percent=%s" % (\
total, used, free, str(percent) + '%')
return disk_usage
if __name__ == "__main__":
for item in ServerInfo.Create().iteritems():
print item
| {
"repo_name": "weibohit/tools",
"path": "utility/server_info.py",
"copies": "1",
"size": "1865",
"license": "mit",
"hash": -4352658893277456400,
"line_mean": 30.6101694915,
"line_max": 72,
"alpha_frac": 0.6209115282,
"autogenerated": false,
"ratio": 3.642578125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9652603223261071,
"avg_score": 0.022177285987785726,
"num_lines": 59
} |
__all__ = ["Server", "ServerStatistics", "ServerStatus", "ServerRefiller"]
from abc import ABC
from copy import copy
from sys import getsizeof
from threading import Lock, Thread
from time import time
from typing import Any, Dict, Optional, Type, Union
from weakref import ref as weak_ref
from nichtparasoup import VERSION
from nichtparasoup._internals import _log
from nichtparasoup.core import Crawler, NPCore
class Server(object):
"""
This class is intended to be thread save.
This class intended to be a stable interface.
Its public methods return base types only.
"""
def __init__(self, core: NPCore, crawler_upkeep: int = 30,
reset_timeout: int = 60 * 60) -> None: # pragma: no cover
self.core = core
self.keep = crawler_upkeep
self.reset_timeout = reset_timeout
self._stats = ServerStatistics()
self._refiller = None # type: Optional[ServerRefiller]
self._trigger_reset = False
self._locks = _ServerLocks()
self.__running = False
def get_image(self) -> Optional[Dict[str, Any]]:
crawler = self.core.crawlers.get_random()
if not crawler:
return None
image = copy(crawler.pop_random_image())
if not image:
return None
with self._locks.stats_get_image:
self._stats.count_images_served += 1
return dict(
uri=image.uri,
is_generic=image.is_generic,
source=image.source,
more=image.more,
crawler=dict(
id=id(crawler),
type=type_module_name_str(type(crawler.imagecrawler)),
),
)
@staticmethod
def _log_refill_crawler(crawler: Crawler, refilled: int) -> None:
# must be compatible to nichtparasoup.core._OnFill
if refilled > 0:
_log('info', "refilled by {} via {!r}".format(refilled, crawler.imagecrawler))
def refill(self) -> Dict[str, bool]:
with self._locks.refill:
self.core.fill_up_to(self.keep, self._log_refill_crawler)
return dict(refilled=True)
def _reset(self) -> None:
with self._locks.reset:
self._stats.cum_blacklist_on_flush += self.core.reset()
self._stats.count_reset += 1
self._stats.time_last_reset = int(time())
def request_reset(self) -> Dict[str, Any]:
if not self.is_alive():
request_valid = True
timeout = 0
else:
now = int(time())
time_started = self._stats.time_started or now
timeout_base = self.reset_timeout
time_last_reset = self._stats.time_last_reset
reset_after = timeout_base + (time_last_reset or time_started)
request_valid = now > reset_after
timeout = timeout_base if request_valid else (reset_after - now)
if request_valid:
self._reset()
return dict(
requested=request_valid,
timeout=timeout,
)
def start(self) -> None:
with self._locks.run:
if self.__running:
raise RuntimeError('already running')
_log('info', " * starting {}".format(type(self).__name__))
_log('info', ' * fill all crawlers up to {}'.format(self.keep))
self.refill() # initial fill
if not self._refiller:
self._refiller = ServerRefiller(self, 1)
self._refiller.start() # start threaded periodical refill
self._stats.time_started = int(time())
self.__running = True
def is_alive(self) -> bool:
return self.__running
def stop(self) -> None:
with self._locks.run:
if not self.__running:
raise RuntimeError('not running')
_log('info', "\r\n * stopping {}".format(type(self).__name__))
if self._refiller:
self._refiller.stop()
self._refiller = None
self.__running = False
class ServerStatus(ABC):
"""
This class intended to be a stable interface.
All public methods are like this: Callable[[Server], Union[List[SomeBaseType], Dict[str, SomeBaseType]]]
All public methods must be associated with stat(u)s!
"""
@staticmethod
def server(server: Server) -> Dict[str, Any]:
stats = copy(server._stats)
now = int(time())
uptime = (now - stats.time_started) if server.is_alive() and stats.time_started else 0
return dict(
version=VERSION,
uptime=uptime,
reset=dict(
count=stats.count_reset,
since=(now - stats.time_last_reset) if stats.time_last_reset else uptime,
),
images=dict(
served=stats.count_images_served,
crawled=stats.cum_blacklist_on_flush + len(server.core.blacklist),
),
)
@staticmethod
def blacklist(server: Server) -> Dict[str, Any]:
blacklist = server.core.blacklist.copy()
return dict(
len=len(blacklist),
size=getsizeof(blacklist),
)
@staticmethod
def crawlers(server: Server) -> Dict[int, Dict[str, Any]]:
status = dict()
for crawler in server.core.crawlers.copy():
crawler_id = id(crawler)
crawler = copy(crawler)
images = crawler.images.copy()
status[crawler_id] = dict(
weight=crawler.weight,
type=type_module_name_str(type(crawler.imagecrawler)),
config=crawler.imagecrawler.get_config(), # just a dict
images=dict(
len=len(images),
size=getsizeof(images),
),
)
return status
def type_module_name_str(t: Type[Any]) -> str:
return '{}:{}'.format(t.__module__, t.__name__)
class ServerRefiller(Thread):
def __init__(self, server: Server, sleep: Union[int, float]) -> None: # pragma: no cover
from threading import Event
super().__init__(daemon=True)
self._server_wr = weak_ref(server)
self._sleep = sleep
self._stop_event = Event()
self._run_lock = Lock()
def run(self) -> None:
from time import sleep
while not self._stop_event.is_set():
server = self._server_wr() # type: Optional[Server]
if server:
server.refill()
else:
_log('info', " * server gone. stopping {}".format(type(self).__name__))
self._stop_event.set()
if not self._stop_event.is_set():
sleep(self._sleep)
def start(self) -> None:
self._run_lock.acquire()
try:
if self.is_alive():
raise RuntimeError('already running')
_log('info', " * starting {}".format(type(self).__name__))
self._stop_event.clear()
super().start()
finally:
self._run_lock.release()
def stop(self) -> None:
self._run_lock.acquire()
try:
if not self.is_alive():
raise RuntimeError('not running')
_log('info', " * stopping {}".format(type(self).__name__))
self._stop_event.set()
finally:
self._run_lock.release()
class ServerStatistics(object):
def __init__(self) -> None: # pragma: no cover
self.time_started = None # type: Optional[int]
self.count_images_served = 0 # type: int
self.count_reset = 0 # type: int
self.time_last_reset = None # type: Optional[int]
self.cum_blacklist_on_flush = 0 # type: int
class _ServerLocks(object):
def __init__(self) -> None: # pragma: no cover
self.stats_get_image = Lock()
self.reset = Lock()
self.refill = Lock()
self.run = Lock()
| {
"repo_name": "k4cg/nichtparasoup",
"path": "nichtparasoup/core/server.py",
"copies": "1",
"size": "7977",
"license": "mit",
"hash": 9056316017208411000,
"line_mean": 33.5324675325,
"line_max": 108,
"alpha_frac": 0.5530901341,
"autogenerated": false,
"ratio": 3.998496240601504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051586374701504,
"avg_score": null,
"num_lines": null
} |
__all__ = ["Session"]
import time
from basic_types import WebPoint
from capabilities import Capabilities
from geoposition import Geoposition
from browser.status import *
from browser.xwalk import Xwalk
from base.log import VLOG
class FrameInfo(object):
def __init__(self, parent_frame_id, frame_id, xwalkdriver_frame_id):
self.parent_frame_id = parent_frame_id
self.frame_id = frame_id
self.xwalkdriver_frame_id = xwalkdriver_frame_id
def Update(self, other):
self.parent_frame_id = other.parent_frame_id
self.frame_id = other.frame_id
self.xwalkdriver_frame_id = other.xwalkdriver_frame_id
class Session(object):
# set default page load time out 5 minutes
kDefaultPageLoadTimeout = 300
def __init__(self, sid, xwalk=Xwalk()):
# rename id to sid avoid disturbing key word
self.sid = sid
self.xwalk = xwalk
self.quit = False
self.detach = False
self.force_devtools_screenshot = False
self.sticky_modifiers = 0
self.mouse_position = WebPoint(0, 0)
self.page_load_timeout = Session.kDefaultPageLoadTimeout
self.window = ""
# List of |FrameInfo|s for each frame to the current target frame from the
# first frame element in the root document. If target frame is window.top,
# this list will be empty.
self.frames = []
# implicit_wait mill seconds
self.implicit_wait = None
self.script_timeout = None
self.prompt_text = ""
self.overridden_geoposition = None
# Logs that populate from DevTools events.
self.devtools_logs = []
self.driver_log = None
# TODO:implement <base::ScopedTempDir>
self.temp_dir = None
self.capabilities = {}
def Update(self, other):
self.sid = other.sid
self.xwalk = other.xwalk
self.quit = other.quit
self.detach = other.detach
self.force_devtools_screenshot = other.force_devtools_screenshot
self.sticky_modifiers = other.sticky_modifiers
self.mouse_position = other.mouse_position
self.page_load_timeout = other.page_load_timeout
self.window = other.window
self.frames = other.frames
self.implicit_wait = other.implicit_wait
self.script_timeout = other.script_timeout
self.prompt_text = other.prompt_text
self.overridden_geoposition = other.overridden_geoposition
self.devtools_logs = other.devtools_logs
self.driver_log = other.driver_log
self.temp_dir = other.temp_dir
self.capabilities = other.capabilities
def GetTargetWindow(self, web_view):
if self.xwalk == None:
return Status(kNoSuchWindow, "no xwalk started in this session")
status = self.xwalk.GetWebViewById(self.window, web_view)
#VLOG(0, "after GetWebViewById: " + status.Message())
if status.IsError():
status = Status(kNoSuchWindow, "target window already closed")
return status
def SwitchToTopFrame(self):
self.frames = []
return
def SwitchToSubFrame(self, frame_id, xwalkdriver_frame_id):
parent_frame_id = ""
if self.frames:
parent_frame_id = self.frames[-1]
self.frames.append(FrameInfo(parent_frame_id, frame_id, xwalkdriver_frame_id))
return
def GetCurrentFrameId(self):
if not self.frames:
return ""
return self.frames[-1].frame_id
def GetAllLogs(self):
pass
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "misc/session.py",
"copies": "1",
"size": "3275",
"license": "bsd-3-clause",
"hash": 6817627009321732000,
"line_mean": 31.4257425743,
"line_max": 82,
"alpha_frac": 0.6974045802,
"autogenerated": false,
"ratio": 3.5714285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47688331516285715,
"avg_score": null,
"num_lines": null
} |
__all__=['set_length_units','init_dirs']
import os
import sastool.libconfig as libconfig
from IPython.core.getipython import get_ipython
from sastool.classes2 import Loader
from sastool.io import credo_saxsctrl, credo_cct
def set_length_units(units):
"""Set the length units: either 'nm' or 'A'.
"""
libconfig.LENGTH_UNIT = units
print("Length units have been set to:", units,flush=True)
def init_dirs(rootdir_or_loader, outputpath, saveto_dir='data',
auximages_dir='auximages', prefix='crd'):
"""Initialize the directiories.
Inputs:
rootdir_or_loader: depends on the type:
str:
the root directory of the SAXSCtrl/CCT
software, i.e. where the subfolders ``eval2d``, ``param``,
``images``, ``mask`` etc. reside.
sastool.classes2.Loader instance:
a fully initialized loader, which will be used to acquire
headers and exposures.
list:
a list of sastool.classes2.Loader instances, which will
be used to open headers and exposures. When opening something,
always the first item will be tried first, and if it fails with
FileNotFoundError, the second, third, etc. will be tried until
either the file can be opened or the last one fails.
outputpath: the directory where the produced files are
written. This is usually the working directory of
the IPython notebook.
saveto_dir: the subdirectory where averaged, united,
subtracted etc. datasets are written.
auximages_dir: the subdirectory where automatically produced
images reside.
Remarks:
If a single root directory is given, a list of four loaders will be
constructed in this order: CCT (processed), CCT (raw), SAXSCtrl (processed),
SAXSCtrl (raw). Raw and processed loaders are handled separately.
"""
ip = get_ipython()
if isinstance(rootdir_or_loader, str):
print("Initializing loaders for SAXSCtrl and CCT.", flush=True)
ip.user_ns['_loaders'] = [
credo_cct.Loader(rootdir_or_loader, processed=True, exposureclass=prefix),
credo_saxsctrl.Loader(rootdir_or_loader, processed=True, exposureclass=prefix),
credo_cct.Loader(rootdir_or_loader, processed=False, exposureclass=prefix),
credo_saxsctrl.Loader(rootdir_or_loader, processed=False, exposureclass=prefix),
]
print("Loaders initialized.", flush=True)
elif isinstance(rootdir_or_loader, Loader):
ip.user_ns['_loaders'] = [rootdir_or_loader]
elif isinstance(rootdir_or_loader, list) and all([isinstance(l, Loader) for l in rootdir_or_loader]):
ip.user_ns['_loaders'] = rootdir_or_loader[:]
else:
raise TypeError(rootdir_or_loader)
if not os.path.isdir(outputpath):
os.makedirs(outputpath)
print("Output files will be written to:", outputpath)
os.chdir(outputpath)
ip.user_ns['outputpath'] = outputpath
if not os.path.isdir(os.path.join(ip.user_ns['outputpath'], saveto_dir)):
os.mkdir(os.path.join(ip.user_ns['outputpath'], saveto_dir))
if not os.path.isdir(os.path.join(ip.user_ns['outputpath'], auximages_dir)):
os.mkdir(os.path.join(ip.user_ns['outputpath'], auximages_dir))
ip.user_ns['auximages_dir'] = os.path.join(outputpath, auximages_dir)
ip.user_ns['saveto_dir'] = os.path.join(outputpath, saveto_dir)
ip.user_ns['saveto_dir_rel'] = saveto_dir
ip.user_ns['auximages_dir_rel'] = auximages_dir
ip.user_ns['crd_prefix']=prefix
set_length_units('nm') | {
"repo_name": "awacha/credolib",
"path": "credolib/initialization.py",
"copies": "1",
"size": "3709",
"license": "bsd-3-clause",
"hash": 305364757760909440,
"line_mean": 44.243902439,
"line_max": 105,
"alpha_frac": 0.6505796711,
"autogenerated": false,
"ratio": 3.683217477656405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4833797148756405,
"avg_score": null,
"num_lines": null
} |
__all__ = ['SetMode']
import time
import logging
import dronekit
import geopy
from ...configuration import Configuration
from ...state import State
from ...specification import Specification
from ...environment import Environment
from ...command import Parameter, Command
from ...specification import Specification, Idle
from ...valueRange import DiscreteValueRange
from ..connection import CommandLong
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
def timeout_land(a, s, e, c) -> float:
timeout = s.altitude * c.time_per_metre_travelled
timeout += c.constant_timeout_offset
return timeout
def timeout_rtl(a, s, e, c) -> float:
# compute distance
from_loc = (s['latitude'], s['longitude'])
to_loc = (s['home_latitude'], s['home_longitude'])
dist = geopy.distance.great_circle(from_loc, to_loc).meters
# compute time taken to travel from A to B, and time taken to land
time_goto_phase = dist * c.time_per_metre_travelled
time_land_phase = s['altitude'] * c.time_per_metre_travelled
# compute total timeout
timeout = \
time_goto_phase + time_land_phase + s.constant_timeout_offset
return timeout
SetModeLand = Specification(
'land',
"""
(and
(= $mode "LAND")
(> _altitude 0.3))
""",
"""
(and
(= __mode "LAND")
(= _longitude __longitude)
(= _latitude __latitude)
(= __altitude 0.0)
(= __armed false))
""",
timeout_land)
SetModeGuided = Specification(
'guided',
'(= $mode "GUIDED")',
'(= __mode "GUIDED")')
SetModeLoiter = Specification(
'loiter',
'(= $mode "LOITER")',
'(= __mode "LOITER")')
SetModeRTL = Specification(
'rtl',
'(= $mode "RTL")',
"""
(and
(= __mode "RTL")
(ite (< _altitude 0.3) (= _armed __armed)
(= _armed false))
(= __longitude _home_longitude)
(= __latitude _home_latitude)
(= __altitude 0.0))
""",
timeout_rtl)
class SetMode(Command):
uid = 'ardu:copter:set-mode'
name = 'set-mode'
parameters = [
Parameter('mode',
DiscreteValueRange(['GUIDED', 'LOITER', 'RTL', 'LAND']))
]
specifications = [
SetModeGuided,
SetModeLoiter,
SetModeRTL,
SetModeLand,
Idle
]
def dispatch(self,
sandbox: 'Sandbox',
state: State,
environment: Environment,
config: Configuration
) -> None:
sandbox.connection.mode = dronekit.VehicleMode(self.mode)
def to_message(self) -> CommandLong:
raise NotImplementedError
| {
"repo_name": "squaresLab/Houston",
"path": "houston/ardu/copter/setmode.py",
"copies": "1",
"size": "2718",
"license": "mit",
"hash": 2967443804641049000,
"line_mean": 22.8421052632,
"line_max": 74,
"alpha_frac": 0.5820456218,
"autogenerated": false,
"ratio": 3.682926829268293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4764972451068292,
"avg_score": null,
"num_lines": null
} |
__all__ = ('set_options', 'add_options', 'get_options',
'set_classpath', 'add_classpath', 'get_classpath',
'expand_classpath')
import platform
if platform.system() == 'Windows':
split_char = ';'
else:
split_char = ':'
vm_running = False
options = []
classpath = None
def set_options(*opts):
"Sets the list of options to the JVM. Removes any previously set options."
if vm_running:
raise ValueError("VM is already running, can't set options")
globals()['options'] = opts
def add_options(*opts):
"Appends options to the list of VM options."
if vm_running:
raise ValueError("VM is already running, can't set options")
global options
options.extend(opts)
def get_options():
"Retrieves the current list of VM options."
global options
return list(options)
def set_classpath(*path):
"""
Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable.
"""
if vm_running:
raise ValueError("VM is already running, can't set classpath")
global classpath
classpath = path
def add_classpath(*path):
"""
Appends items to the classpath for the JVM to use.
Replaces any existing classpath, overriding the CLASSPATH environment variable.
"""
if vm_running:
raise ValueError("VM is already running, can't set classpath")
global classpath
if classpath is None:
classpath = list(path)
else:
classpath.extend(path)
def get_classpath():
"Retrieves the classpath the JVM will use."
from os import environ
from os.path import realpath
global classpath
# add a path to java classes packaged with jnius
from pkg_resources import resource_filename
return_classpath = [realpath(resource_filename(__name__, 'jnius/src'))]
if classpath is not None:
return_classpath = classpath + return_classpath
elif 'CLASSPATH' in environ:
return_classpath = environ['CLASSPATH'].split(split_char) + return_classpath
else:
return_classpath = [realpath('.')] + return_classpath
return return_classpath
def expand_classpath():
from glob import glob
paths = []
# deal with wildcards
for path in get_classpath():
if not path.endswith('*'):
paths.append(path)
else:
paths.extend(glob(path + '.[Jj][Aa][Rr]'))
return split_char.join(paths)
| {
"repo_name": "physion/pyjnius",
"path": "jnius_config.py",
"copies": "1",
"size": "2462",
"license": "mit",
"hash": -9040899539550102000,
"line_mean": 27.6279069767,
"line_max": 122,
"alpha_frac": 0.654752234,
"autogenerated": false,
"ratio": 4.0627062706270625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034361827222777015,
"num_lines": 86
} |
__all__ = ['Setting', 'BoolSetting', 'IntSetting', 'StrSetting']
class Setting:
def __init__(self, default=None, choices=None):
self.default = default
self.choices = choices
def clean(self, value):
if not self.choices is None and not value in self.choices:
raise ValueError('value must be in {}'.format(self.choices))
return value
def format(self, value):
if value is None:
return '<null>'
return str(value)
class BoolSetting(Setting):
def clean(self, value):
if value == 'on':
value = True
elif value == 'off':
value = False
elif not isinstance(value, bool):
raise ValueError('value must be a boolean')
return super().clean(value)
def format(self, value):
if value is True:
return 'on'
elif value is False:
return 'off'
else:
return super().format(value)
class IntSetting(Setting):
def clean(self, value):
if not isinstance(value, int):
value = int(value)
return super().clean(value)
class StrSetting(Setting):
def clean(self, value):
if not isinstance(value, str):
raise ValueError('value must be a string')
return super().clean(value)
class Settings:
def __init__(self):
self.variables = {}
self.data = {}
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key in ('variables', 'data') or not key in self:
super().__setattr__(key, value)
else:
self[key] = value
def __getitem__(self, key):
try:
variable = self.variables[key]
except KeyError:
raise KeyError('invalid setting "{}"'.format(key))
return self.data.get(key, variable.default)
def __setitem__(self, key, value):
try:
variable = self.variables[key]
except KeyError:
raise KeyError('invalid setting "{}"'.format(key))
self.data[key] = variable.clean(value)
def __len__(self):
return len(self.variables)
def __contains__(self, key):
return key in self.variables
def keys(self):
return self.variables.keys()
def get_display(self, key):
try:
variable = self.variables[key]
except KeyError:
raise KeyError('invalid setting "{}"'.format(key))
value = self.data.get(key, variable.default)
return variable.format(value)
| {
"repo_name": "natano/tiget",
"path": "tiget/plugins/settings.py",
"copies": "1",
"size": "2671",
"license": "isc",
"hash": -4539372739914918000,
"line_mean": 25.9797979798,
"line_max": 72,
"alpha_frac": 0.5567203295,
"autogenerated": false,
"ratio": 4.32200647249191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003191115131413639,
"num_lines": 99
} |
__all__ = ["ShellProvider"]
from .base import Provider
from freight.models import App, Deploy
class ShellProvider(Provider):
def get_options(self):
return {"command": {"required": True}, "env": {"required": False, "type": dict}}
def get_command(self, deploy, task, ssh_key):
params = task.params or {}
app = App.query.get(task.app_id)
prev_sha = app.get_previous_sha(deploy.environment, current_sha=task.sha)
return task.provider_config["command"].format(
environment=deploy.environment,
sha=task.sha,
prev_sha=prev_sha or "",
ref=task.ref,
ssh_key=ssh_key,
params=params,
)
def execute(self, workspace, task):
deploy = Deploy.query.filter(Deploy.task_id == task.id).first()
return self.execute_deploy(workspace, deploy, task)
def execute_deploy(self, workspace, deploy, task):
# keep ssh_key in scope to ensure it doesnt get wiped until run() exits
ssh_key = self.get_ssh_key()
command = self.get_command(
deploy=deploy,
task=task,
ssh_key=ssh_key.name if ssh_key else "~/.ssh/id_rsa",
)
return workspace.run(command, env=task.provider_config.get("env"))
| {
"repo_name": "getsentry/freight",
"path": "freight/providers/shell.py",
"copies": "1",
"size": "1293",
"license": "apache-2.0",
"hash": 4145228857005067000,
"line_mean": 32.1538461538,
"line_max": 88,
"alpha_frac": 0.5993812838,
"autogenerated": false,
"ratio": 3.7369942196531793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9833371519221461,
"avg_score": 0.0006007968463435715,
"num_lines": 39
} |
# All shows: http://www.funimation.com/feeds/ps/shows?limit=100000
# http://www.funimation.com/feeds/ps/shows?sort=SortOptionLatestSubscription (limit no workie)
# Single show: http://www.funimation.com/feeds/ps/videos?ut=FunimationSubscriptionUser&show_id=7556914&limit=100000
from logging import debug, info, warning, error, exception
from datetime import datetime
import re
from .. import AbstractServiceHandler
from data.models import Episode
class ServiceHandler(AbstractServiceHandler):
_show_url = "http://funimation.com/shows/{id}"
_episode_feed = "https://api-funimation.dadcdigital.com/xml/longlist/content/page/?id=shows&sort=&sort_direction=DESC&itemThemes=dateAddedShow&territory=US&offset=0&limit=30"
_episode_url = "http://www.funimation.com/shows/{show_slug}/videos/official/{ep_slug}?watch=sub"
_re_episode_num = re.compile("Episode ([0-9]+)", re.I)
def __init__(self):
super().__init__("funimation_new", "FUNimation", False)
def get_all_episodes(self, stream, **kwargs):
info("Getting live episodes for Funimation_new/{} ({})".format(stream.show_key, stream.show_id))
if not stream.show_id:
debug(" ID required and not given")
return []
episode_datas = self._get_feed_shows(stream.show_id, **kwargs)
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data, stream.show_id):
try:
episodes.append(self._digest_episode(episode_data, stream))
except:
exception("Problem digesting episode for Funimation_new/{} ({})".format(stream.show_key, stream.show_id))
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
if len(episode_datas) != len(episodes):
warning(" Not all episodes processed")
else:
debug(" No episodes found")
return episodes
def get_stream_link(self, stream):
# Just going to assume it's the correct service
return self._show_url.format(id=stream.show_key)
def _get_feed_shows(self, show_id, **kwargs):
"""
Always returns a list.
"""
info("Getting episodes for Funimation/{}".format(show_id))
# Send request
response = self.request(self._episode_feed, xml=True, **kwargs)
if response is None:
error("Cannot get latest shows feed".format(show_id))
return list()
# Parse response
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
#print(rss)
return response
def _digest_episode(self, feed_episode, stream):
debug("Digesting episode")
# Get data
content = feed_episode.find("content").find("metadata")
num_text = content.find("recentContentItem").text
num_match = self._re_episode_num.match(num_text)
if not num_match:
error("recentContentItem episode has unknown format: \"{}\"".format(num_text))
num = int(num_match.group(1))
debug(" num={}".format(num))
name = None #feed_episode["show_name"] #FIXME
debug(" name={}".format(name))
link = None #self._episode_url.format(show_slug=stream.show_key, ep_slug=feed_episode["url"]) #FIXME
debug(" link={}".format(link))
#FIXME: content-metadata contains "<recentlyAdded>added {1458071999} ago"; could use timestamp
date = datetime.now() #datetime.strptime(feed_episode["releaseDate"], "%Y/%m/%d")
debug(" date={}".format(date))
return Episode(num, name, link, date)
def get_stream_info(self, stream, **kwargs):
# TODO important
return None
def get_seasonal_streams(self, **kwargs):
return list()
def extract_show_key(self, url):
return None
# Helpers
def _verify_feed(feed):
return True
def _is_valid_episode(feed_episode, show_id):
block = feed_episode.find("id")
if block is None or block.text != show_id:
return False
block = feed_episode.find("content")
if not block or not block.find("metadata"):
print("Content block not found")
return False
return True
| {
"repo_name": "TheEnigmaBlade/holo",
"path": "src/services/stream/funimation_new.py",
"copies": "2",
"size": "3889",
"license": "mit",
"hash": 1527680576630743600,
"line_mean": 33.4159292035,
"line_max": 175,
"alpha_frac": 0.6994085883,
"autogenerated": false,
"ratio": 3.138821630347054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4838230218647054,
"avg_score": null,
"num_lines": null
} |
__all__ = ['shuffleMockCatalog', 'generate_upid']
import warnings
from itertools import izip
import numpy as np
from numpy.lib.recfunctions import rename_fields
def _iter_plateau_in_sorted_array(a):
if len(a):
k = np.where(a[1:] != a[:-1])[0]
k += 1
i = 0
for j in k:
yield i, j
i = j
yield i, len(a)
def _iter_indices_in_bins(bins, a):
if len(a) and len(bins):
s = a.argsort()
k = np.searchsorted(a, bins, 'right', sorter=s)
i = 0
for j in k:
yield s[i:j]
i = j
yield s[i:]
def _apply_rotation(pos, box_size):
half_box_size = box_size * 0.5
pos[pos > half_box_size] -= box_size
pos[pos < -half_box_size] += box_size
return np.dot(pos, np.linalg.qr(np.random.randn(3,3))[0])
_axes = list('xyz')
def _get_xyz(a, ax_type=float):
return np.fromiter((a[ax] for ax in _axes), ax_type, 3)
def generate_upid(pid, id, recursive=True):
"""
To generate (or to fix) the upid of a halo catalog.
Parameters
----------
pid : array_like
An ndarray of integer that contains the parent IDs of each halo.
id : array_like
An ndarray of integer that contains the halo IDs.
recursive : bool, optional
Whether or not to run this function recursively. Default is True.
Returns
-------
upid : array_like
The ultimate parent IDs.
Examples
--------
>>> halos['upid'] = generate_upid(halos['pid'], halos['id'])
"""
pid = np.ravel(pid)
id = np.ravel(id)
if len(id) != len(pid):
raise ValueError('`pid` and `id` must have the same length.')
if not len(pid):
raise ValueError('`pid` and `id` must not be empty.')
s = pid.argsort()
idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \
np.dtype([('start', int), ('stop', int)]))
unique_pid = pid[s[idx['start']]]
if unique_pid[0] == -1:
unique_pid = unique_pid[1:]
idx = idx[1:]
host_flag = (pid == -1)
not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]
if not len(not_found):
return pid
sub_flag = np.where(~host_flag)[0]
found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]
found = found[id[found].argsort()]
assert (id[found] == unique_pid[not_found]).all()
del host_flag, sub_flag, unique_pid
pid_old = pid.copy()
for i, j in izip(found, not_found):
pid[s[slice(*idx[j])]] = pid_old[i]
del pid_old, idx, s, found, not_found
return generate_upid(pid, id, True) if recursive else pid
def shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,
proxy='mvir', box_size=None, apply_rsd=False,
shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,
return_structured_array=False):
"""
Shuffle a mock catalog according to Zentner et al. (2014) [arXiv:1311.1818]
Parameters
----------
mock_ids : array_like
Should be a 1-d array of int which contains the corresponding halo IDs
for the galaxies in the mock catalog to be shuffled.
halo_catalog : array_like
Should be a 1-d structrued array which has the following fields:
id, upid, x, y, z, vz (if `apply_rsd` it True), and the proxy.
bin_width : float or None, optional
The width of the bin, in dex.
bins : int, array_like, or None, optional
If an integer is provided, it is interpreted as the number of bins.
If an array is provided, it is interpreted as the edges of the bins.
The parameter _overwrites_ `bin_width`.
proxy : string, optional
The proxy to bin on. Must be present in the fields of `halo_catalog`.
box_size : float or None, optional
The side length of the box. Should be in the same unit as x, y, z.
apply_rsd : bool, optional
Whether or not to apply redshift space distortions on the z-axis.
(Default is False)
shuffle_centrals : bool, optional
Whether or not to shuffle central galaxies (Default is True)
shuffle_satellites : bool, optional
Whether or not to shuffle satellite galaxies (Default is True)
rotate_satellites : bool, optional
Whether or not to apply a random rotation to satellite galaxies
(Default is False)
return_structured_array : bool, optional
Whether to return a structured array that contains x, y, z
or just a n-by-3 float array.
Returns
-------
pos : array_like
A ndarray that contains x, y, z of the shuffled positions.
"""
# check necessary fields in halo_catalog
fields = ['id', 'upid', proxy] + _axes
if apply_rsd:
fields.append('vz')
if not all((f in halo_catalog.dtype.names for f in fields)):
raise ValueError('`halo_catalog` should have the following fields: '+ \
', '.join(fields))
# check dtype
ax_type = halo_catalog['x'].dtype.type
if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):
raise ValueError('The types of fields x, y, z in `halo_catalog` ' \
'must all be the same.')
# check all mock_ids are in halo_catalog
s = halo_catalog['id'].argsort()
idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)
try:
idx = s[idx]
except IndexError:
raise ValueError('`mock_ids` must all present in `halo_catalog`')
if not (halo_catalog['id'][idx] == mock_ids).all():
raise ValueError('`mock_ids` must all present in `halo_catalog`')
mock_idx = np.ones(len(halo_catalog), dtype=int)
mock_idx *= -1
mock_idx[idx] = np.arange(len(mock_ids))
del idx
# separate hosts and subs
host_flag = (halo_catalog['upid'] == -1)
subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})
subs['mock_idx'] = mock_idx[~host_flag]
subs = subs[subs['mock_idx'] > -1] # only need subs that are mocks
host_flag = s[host_flag[s]] # this sorts `hosts` by `id`
hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})
hosts['mock_idx'] = mock_idx[host_flag]
del host_flag, mock_idx, s
# group subhalos
subs.sort(order='upid')
idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \
np.dtype([('start', int), ('stop', int)]))
host_ids = subs['upid'][idx['start']]
if not np.in1d(host_ids, hosts['id'], True).all():
raise ValueError('Some subhalos associdated with the mock galaxies ' \
'have no parent halos in `halo_catalog`. Consider using ' \
'`generate_upid` to fix this.')
# for the following to work, `hosts` need to be sorted by `id`
subs_idx = np.zeros(len(hosts), dtype=idx.dtype)
subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx
del idx, host_ids
# check bins
try:
bin_width = float(bin_width)
except (ValueError, TypeError):
bin_width = None
else:
if bin_width <= 0:
bin_width = None
if bin_width is None:
bin_width = 0.1
mi = np.log10(hosts[proxy].min()*0.99999)
ma = np.log10(hosts[proxy].max())
if bins is None:
bins = int(np.ceil((ma-mi)/bin_width))
mi = ma - bin_width*bins
try:
bins = int(bins)
except (ValueError, TypeError):
bins = np.asarray(bins)
if len(bins) < 2 or (bins[1:]<bins[:-1]).any():
raise ValueError('Please specify a valid `bin` parameter.')
else:
bins = np.logspace(mi, ma, bins+1)
# create the array for storing results
pos = np.empty((len(mock_ids), 3), ax_type)
pos.fill(np.nan)
# loop of bins of proxy (e.g. mvir)
for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):
if not len(indices):
continue
if i==0 or i==len(bins):
if (hosts['mock_idx'][indices] > -1).any() or \
any((subs_idx['start'][j] < subs_idx['stop'][j] \
for j in indices)):
warnings.warn('Some halos associdated with the mock catalog ' \
'are outside the bin range.', RuntimeWarning)
continue
# shuffle satellites
if shuffle_satellites:
choices = indices.tolist()
for j in indices:
subs_this = subs[slice(*subs_idx[j])]
if not len(subs_this):
continue
mock_idx_this = subs_this['mock_idx']
pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))
if shuffle_satellites:
k = choices.pop(np.random.randint(len(choices)))
pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)
if rotate_satellites:
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)
if apply_rsd:
pos[mock_idx_this,2] += (subs_this['vz'] \
+ hosts['vz'][k] - hosts['vz'][j])/100.0
else:
if rotate_satellites:
host_pos = _get_xyz(hosts[j], ax_type)
pos[mock_idx_this] -= host_pos
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += host_pos
if apply_rsd:
pos[mock_idx_this,2] += subs_this['vz']/100.0
# shuffle hosts
has_mock = indices[hosts['mock_idx'][indices] > -1]
if not len(has_mock):
continue
mock_idx_this = hosts['mock_idx'][has_mock]
if shuffle_centrals:
has_mock = np.random.choice(indices, len(has_mock), False)
pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))
if apply_rsd:
pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0
# sanity check
if np.isnan(pos).any():
warnings.warn('Some galaxies in the mock catalog have not been ' \
'assigned a new position. Maybe the corresponding halo is ' \
'outside the bin range.', RuntimeWarning)
# wrap box
if box_size is not None:
pos = np.remainder(pos, box_size, pos)
if return_structured_array:
pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))
return pos
| {
"repo_name": "manodeep/yymao-helpers",
"path": "helpers/shuffleMockCatalog.py",
"copies": "1",
"size": "10527",
"license": "mit",
"hash": -8202520162064339000,
"line_mean": 35.9368421053,
"line_max": 80,
"alpha_frac": 0.5709128907,
"autogenerated": false,
"ratio": 3.4571428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9482414060514801,
"avg_score": 0.009128337465611315,
"num_lines": 285
} |
__all__ = ('SideBar')
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.boxlayout import BoxLayout
from pocketthrone.entities.event import *
from pocketthrone.entities.enum import WidgetAction, WidgetState, Enum
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.managers.pipe import L
class SideBarType(Enum):
RECRUIT = "recruit"
CONSTRUCT = "construct"
# kivy widget implementation of the SideBar, used for recruiting, build & menu buttons
# please create it first and then call a show_* method
class SideBar(BoxLayout):
_tag = "[SideBar] "
rows = []
sidebar_type = None
is_initialized = False
# initialize with BoxLayout parameters
def __init__(self, sidebar_type):
# make vertical BoxLayout
super(SideBar, self).__init__(orientation="vertical", padding=4, size_hint=(.3, .8), pos=(0, 100))
# register in EventManager and WidgetManager
EventManager.register(self)
L.WidgetManager.register("sidebar", self)
# load unit or building list
self.sidebar_type = sidebar_type
# show recruitable units
if sidebar_type == SideBarType.RECRUIT:
self.show_recruitable()
# show constructable buildings
elif sidebar_type == SideBarType.CONSTRUCT:
self.show_constructable()
else:
print(self._tag + "ABORT; type " + str(sidebar_type) + " isn't valid.")
def show_recruitable(self):
'''shows recruitable units'''
selected_city = L.CityManager.get_selected_city()
# abort when no city is selected
if not selected_city:
print(self._tag + "ABORT; no city is selected")
return
# load recruitable units
blueprints = L.CityManager.get_recruitable_units(selected_city)
if len(blueprints) == 0:
print(self._tag + "ABORT; no units received")
return
for blueprint in blueprints:
unit_name = " " + blueprint.get_name()
unit_basename = blueprint.get_basename()
image_src = "img/" + blueprint.get_image_path() + ".png"
# make outter layout
outter = BoxLayout(orientation="horizontal", size_hint=(1, None), size=(300, 60))
# make image
image = Image(source=image_src, size=(60, 60), on_touch_down=self.option_selected)
image.extra = unit_basename
# make label
label = Label(text=unit_name, font_size="20dp", halign="left", on_touch_down=self.option_selected, size=(200, 60), valign="middle", color=[0,0,0,1])
label.text_size = label.size
label.extra = unit_basename
# add both to layout
outter.add_widget(image)
outter.add_widget(label)
# add layout to SideBar
self.add_widget(outter)
self.is_initialized = True
def show_constructable(self):
pass
# TODO replace with add_row
def add_widget(self, widget):
'''# add a widget to SideBar and the buttons list'''
super(SideBar, self).add_widget(widget)
widget.bind(on_press=self.option_selected)
self.rows.append(widget)
def add_rows(self):
pass
def remove_self(self):
'''method to destroy this SideBar widget'''
L.WidgetManager.get_widget("root_layout").remove_widget(self)
def option_selected(self, widget, touch):
'''triggered when an list item was selected'''
if widget.collide_point(*touch.pos):
print(self._tag + "clicked " + widget.extra)
if self.sidebar_type == SideBarType.RECRUIT:
# load blueprint & selected city
unit_type = widget.extra
blueprint = L.UnitManager.get_blueprint(unit_type)
selected_city = L.CityManager.get_selected_city()
# finish recruitation
L.CityManager.recruit_unit(selected_city, blueprint)
# destroy sidebar
self.remove_self()
def on_event(self, event):
# destroy SideBar on map click
if isinstance(event, TileUnselectedEvent):
self.remove_self()
| {
"repo_name": "herrschr/pocket-throne",
"path": "pocketthrone/widgets/sidebar.py",
"copies": "2",
"size": "3780",
"license": "bsd-2-clause",
"hash": -2649730090788869000,
"line_mean": 32.1578947368,
"line_max": 151,
"alpha_frac": 0.7177248677,
"autogenerated": false,
"ratio": 3.1738035264483626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9749966344236646,
"avg_score": 0.02831240998234356,
"num_lines": 114
} |
__all__ = ["side_by_side_diff", "context_diff"]
import itertools
import re
import diff_match_patch
line_split = re.compile(r'(?:\r?\n)')
def side_by_side_diff(old_text, new_text):
"""
Calculates a side-by-side line-based difference view.
Wraps insertions in <ins></ins> and deletions in <del></del>.
"""
def yield_open_change_site(open_change_site):
""" Yield all open changes. """
ls, rs = open_change_site
# Get unchanged parts onto the right line
if ls[0] == rs[0]:
yield (ls[0], rs[0])
for l, r in itertools.izip_longest(ls[1:], rs[1:]):
yield (l, r)
elif ls[-1] == rs[-1]:
for l, r in itertools.izip_longest(ls[:-1], rs[:-1]):
yield (l, r)
yield (ls[-1], rs[-1])
else:
for l, r in itertools.izip_longest(ls, rs):
yield (l, r)
# Treat an empty string and "None" as same
old_text = old_text or ''
new_text = new_text or ''
if not old_text and not new_text:
return
dmp = diff_match_patch.diff_match_patch()
diff = dmp.diff_main(old_text, new_text)
dmp.diff_cleanupSemantic(diff)
# Store multiple changes around one change site. Insertions & deletions can
# result in lines in the old_text corresponding to two and more lines in
# the new_text. We want to commit them in a batch together.
open_change_site = ([None], [None])
for change_type, entry in diff:
assert change_type in [-1, 0, 1]
# Quote XML as we are inserting our own
entry = (entry.replace('&', '&')
.replace('<', '<')
.replace('>', '>'))
lines = line_split.split(entry)
ls, rs = open_change_site
# Merge with previous entry, an unfinished line, (if still open)
first_line = lines[0]
if change_type == 0:
ls[-1] = ls[-1] or ''
rs[-1] = rs[-1] or ''
ls[-1] = ls[-1] + first_line
rs[-1] = rs[-1] + first_line
elif change_type == 1:
rs[-1] = rs[-1] or ''
rs[-1] += '<ins>%s</ins>' % first_line if first_line else ''
elif change_type == -1:
ls[-1] = ls[-1] or ''
ls[-1] += '<del>%s</del>' % first_line if first_line else ''
lines = lines[1:]
if lines:
if change_type == 0:
# Push out open change site as we now have a 1:1 mapping of an
# old and new line
for entry in yield_open_change_site(open_change_site):
yield entry
# Directly push out lines until last
for line in lines[:-1]:
yield (line, line)
# Keep last line open
open_change_site = ([lines[-1]], [lines[-1]])
elif change_type == 1:
ls, rs = open_change_site
for line in lines:
rs.append('<ins>%s</ins>' % line if line else '')
open_change_site = (ls, rs)
elif change_type == -1:
ls, rs = open_change_site
for line in lines:
ls.append('<del>%s</del>' % line if line else '')
open_change_site = (ls, rs)
# Push out open entry
for entry in yield_open_change_site(open_change_site):
yield entry
def context_diff(diff, context=2):
if context < 0:
raise ValueError("Context must be zero or positive")
unconsumed_context = []
current_change_context = None
current_change_left_line_idx = current_change_right_line_idx = 0
current_change_needs_context_lines_append = 0
left_line_idx = right_line_idx = 0
for entry in diff:
left, right = entry
if left != right:
if current_change_context and len(unconsumed_context) <= context:
# Merge change with preceding change as contexts overlap
current_change_context.extend(unconsumed_context)
else:
if current_change_context:
# We already left the context of the preceding change,
# wrap-up and make ready for new change
yield (current_change_left_line_idx,
current_change_right_line_idx,
current_change_context)
# New change context, add preceding lines
current_change_context = unconsumed_context[-context:]
current_change_left_line_idx = max(0, left_line_idx - context)
current_change_right_line_idx = max(0, right_line_idx - context)
current_change_context.append(entry)
current_change_needs_context_lines_append = context
unconsumed_context = []
else:
# Not a change, but a possible context line
if (current_change_context
and current_change_needs_context_lines_append > 0):
current_change_context.append(entry)
current_change_needs_context_lines_append -= 1
unconsumed_context = []
else:
# Remember the last bit context for following changes, keep one
# more to test if we exceded last change's context
unconsumed_context = unconsumed_context[-context:] + [entry]
if left is not None:
left_line_idx += 1
if right is not None:
right_line_idx += 1
if current_change_context:
yield (current_change_left_line_idx,
current_change_right_line_idx,
current_change_context)
| {
"repo_name": "cburgmer/django-wikify",
"path": "src/wikify/diff_utils.py",
"copies": "1",
"size": "5779",
"license": "bsd-3-clause",
"hash": 5740293291239060000,
"line_mean": 33.6047904192,
"line_max": 80,
"alpha_frac": 0.5286381727,
"autogenerated": false,
"ratio": 4.007628294036061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036266466736061,
"avg_score": null,
"num_lines": null
} |
#ALL SID UNIQUE
#BLOCKS = 7
#TYPES = 2
#TYPE128368.NAME = DFT1
#TYPE128424.NAME = DFT2
#BLOCK128365.NAME = DFP1
#BLOCK128365.INPORT_COUNT = 15
#BLOCK128365.OUTPORT_COUNT = 22
#BLOCK128365.DESCRIPTION = Description128365
#BLOCK128366.NAME = DFP2
#BLOCK128366.INPORT_COUNT = 15
#BLOCK128366.OUTPORT_COUNT = 22
#BLOCK128366.DESCRIPTION = Description128366
#BLOCK128367.NAME = DFP3
#BLOCK128367.INPORT_COUNT = 15
#BLOCK128367.OUTPORT_COUNT = 22
#BLOCK128367.DESCRIPTION = Description128367
#BLOCK128370.NAME = DFP4
#BLOCK128370.INPORT_COUNT = 15
#BLOCK128370.OUTPORT_COUNT = 22
#!BLOCK128370.DESCRIPTION
#BLOCK128393.NAME = DFP5
#BLOCK128393.INPORT_COUNT = 2
#BLOCK128393.OUTPORT_COUNT = 2
#BLOCK128393.DESCRIPTION = Description128393
#BLOCK128420.NAME = DFP6
#BLOCK128420.INPORT_COUNT = 2
#BLOCK128420.OUTPORT_COUNT = 2
#BLOCK128420.DESCRIPTION = Description128420
#BLOCK128435.NAME = DFP7
#BLOCK128435.INPORT_COUNT = 0
#BLOCK128435.OUTPORT_COUNT = 0
#!BLOCK128435.DESCRIPTION
import glob
def check(value, error):
if not value:
print(error)
dfts = glob.glob("DFT*.mdl")
dft_count = len(dfts)
check(dft_count == 2, "Number of DFT output should be 2, was "+str(dft_count))
dfp_inports = {"DFP1":15, "DFP2":15, "DFP3":15, "DFP4":15, "DFP5":2, "DFP6":2, "DFP7":0}
dfp_outports = {"DFP1":22, "DFP2":22, "DFP3":22, "DFP4":22, "DFP5":2, "DFP6":2, "DFP7":0}
dfp_descriptions = {"DFP1":"Description128365", "DFP2":"Description128366", "DFP3":"Description128367", "DFP4":None, "DFP5":"Description128393", "DFP6":"Description128420", "DFP7":None}
lines_expected = ["DFP3.1->DFP2.13", "DFP4.2->DFP5.2"]
with open("Top_Level_DFT.mdl") as f:
content = f.readlines()
sids = []
dfp_names = []
inports = []
outports = []
name = None
dfp_count = 0
lines_actual = []
line_count = 0
for line in content:
l = line.strip()
if l.startswith("BlockType"):
check(l.endswith("ModelReference"), "BlockType should always be 'ModelReference'. For "+str(name)+" found "+l)
if l.startswith("Block {"):
if dfp_count > 0:
expected = dfp_inports[name];
actual = len(inports)
check(actual == expected, "Number of inports for "+name+" expected "+str(expected)+" was "+str(actual))
expected = dfp_outports[name];
actual = len(outports)
check(actual == expected, "Number of outports for "+name+" expected "+str(expected)+" was "+str(actual))
expected = dfp_descriptions[name]
check (description == expected, "Description for "+name+" expected "+str(expected)+" was "+str(description))
description = None
name = None
dfp_count += 1
inports = []
outports = []
if l.startswith("Name\t"):
name = l.split('"')[1]
dfp_names.append(name)
if l.startswith("Description\t"):
description = l.split('"')[1]
if l.startswith("SID\t"):
sid = l.split('"')[1]
check(sid not in sids, "SID should be unique. '"+sid+"' was not.")
sids.append(sid)
if l.startswith("List {"):
port_type = None
if l.startswith("ListType"):
if l.endswith("InputPortNames"):
port_type = "in"
if l.endswith("OutputPortNames"):
port_type = "out"
if l.startswith("port") and port_type == "in":
inports.append(l.split('"')[1])
if l.startswith("port") and port_type == "out":
outports.append(l.split('"')[1])
if l.startswith("Line {"):
if (line_count > 0):
lines_actual.append(str(src_block)+"."+str(src_port)+"->"+str(dst_block)+"."+str(dst_port))
src_block = None
dst_block = None
src_port = None
dst_port = None
line_count += 1
if l.startswith("SrcBlock"):
src_block = l.split('"')[1]
if l.startswith("DstBlock"):
dst_block = l.split('"')[1]
if l.startswith("SrcPort"):
src_port = l[8:].strip()
if l.startswith("DstPort"):
dst_port = l[8:].strip()
print(name)
check(dfp_count == 7, "Number of DFP should be 7, was "+str(dfp_count))
check(line_count == 14, "Number of lines expected 14, was "+str(line_count))
for line in lines_expected:
check(line in lines_actual, "Line not found:"+line)
| {
"repo_name": "mukatee/dsm-mbt-example",
"path": "example/checker.py",
"copies": "1",
"size": "4251",
"license": "mit",
"hash": 9045874768941266000,
"line_mean": 33.8442622951,
"line_max": 185,
"alpha_frac": 0.6271465538,
"autogenerated": false,
"ratio": 2.9831578947368422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.900040337505184,
"avg_score": 0.021980214697000292,
"num_lines": 122
} |
__all__ = ['SimpleDoc']
class SimpleDoc(object):
"""
class generating xml/html documents using context managers
doc, tag, text = SimpleDoc().tagtext()
with tag('html'):
with tag('body', id = 'hello'):
with tag('h1'):
text('Hello world!')
print(doc.getvalue())
"""
class Tag(object):
def __init__(self, doc, name, attrs): # name is the tag name (ex: 'div')
self.doc = doc
self.name = name
self.attrs = attrs
def __enter__(self):
self.parent_tag = self.doc.current_tag
self.doc.current_tag = self
self.position = len(self.doc.result)
self.doc._append('')
def __exit__(self, tpe, value, traceback):
if value is None:
if self.attrs:
self.doc.result[self.position] = "<%s %s>" % (
self.name,
dict_to_attrs(self.attrs),
)
else:
self.doc.result[self.position] = "<%s>" % self.name
self.doc._append("</%s>" % self.name)
self.doc.current_tag = self.parent_tag
class DocumentRoot(object):
def __getattr__(self, item):
raise DocError("DocumentRoot here. You can't access anything here.")
def __init__(self):
self.result = []
self.current_tag = self.__class__.DocumentRoot()
self._append = self.result.append
def tag(self, tag_name, *args, **kwargs):
"""
opens a HTML/XML tag for use inside a `with` statement
the tag is closed when leaving the `with` block
HTML/XML attributes can be supplied as keyword arguments,
or alternatively as (key, value) pairs.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
In order to supply a "class" html attributes, you must supply a `klass` keyword
argument. This is because `class` is a reserved python keyword so you can't use it
outside of a class definition.
Example::
with tag('h1', id = 'main-title'):
text("Hello world!")
# <h1 id="main-title">Hello world!</h1> was appended to the document
with tag('td',
('data-search', 'lemon'),
('data-order', '1384'),
id = '16'
):
text('Citrus Limon')
# you get: <td data-search="lemon" data-order="1384" id="16">Citrus Limon</td>
"""
return self.__class__.Tag(self, tag_name, _attributes(args, kwargs))
def text(self, *strgs):
"""
appends 0 or more strings to the document
the strings are escaped for use as text in html documents, that is,
& becomes &
< becomes <
> becomes >
Example::
username = 'Max'
text('Hello ', username, '!') # appends "Hello Max!" to the current node
text('16 > 4') # appends "16 > 4" to the current node
"""
for strg in strgs:
self._append(html_escape(strg))
def asis(self, *strgs):
"""
appends 0 or more strings to the documents
contrary to the `text` method, the strings are appended "as is"
&, < and > are NOT escaped
Example::
doc.asis('<!DOCTYPE html>') # appends <!DOCTYPE html> to the document
"""
for strg in strgs:
self._append(strg)
def nl(self):
self._append('\n')
def attr(self, *args, **kwargs):
"""
sets HTML/XML attribute(s) on the current tag
HTML/XML attributes are supplied as (key, value) pairs of strings,
or as keyword arguments.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
Note that, instead, you can set html/xml attributes by passing them as
keyword arguments to the `tag` method.
In order to supply a "class" html attributes, you can either pass
a ('class', 'my_value') pair, or supply a `klass` keyword argument
(this is because `class` is a reserved python keyword so you can't use it
outside of a class definition).
Examples::
with tag('h1'):
text('Welcome!')
doc.attr(id = 'welcome-message', klass = 'main-title')
# you get: <h1 id="welcome-message" class="main-title">Welcome!</h1>
with tag('td'):
text('Citrus Limon')
doc.attr(
('data-search', 'lemon'),
('data-order', '1384')
)
# you get: <td data-search="lemon" data-order="1384">Citrus Limon</td>
"""
self.current_tag.attrs.update(args)
self.current_tag.attrs.update(kwargs)
def stag(self, tag_name, *args, **kwargs):
"""
appends a self closing tag to the document
html/xml attributes can be supplied as keyword arguments,
or alternatively as (key, value) pairs.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
Example::
doc.stag('img', src = '/salmon-plays-piano.jpg')
# appends <img src="/salmon-plays-piano.jpg /> to the document
"""
if kwargs:
self._append("<%s %s />" % (
tag_name,
dict_to_attrs(_attributes(args, kwargs)),
))
else:
self._append("<%s />" % tag_name)
def getvalue(self):
"""
returns the whole document as a single string
"""
return ''.join(self.result)
def tagtext(self):
"""
return a triplet composed of::
. the document itself
. its tag method
. its text method
Example::
doc, tag, text = SimpleDoc().tagtext()
with tag('h1'):
text('Hello world!')
print(doc.getvalue()) # prints <h1>Hello world!</h1>
"""
return self, self.tag, self.text
class DocError(Exception):
pass
def html_escape(s):
return s.replace("&", "&").replace("<", "<").replace(">", ">")
def attr_escape(s):
try:
s.replace
except AttributeError:
raise ValueError(
"xml/html attributes should be strings. Got %s (type %s)." % (
repr(s),
repr(type(s))
)
)
return s.replace("&", "&").replace("<", "<").replace('"', """)
def dict_to_attrs(dct):
lst = []
for key, value in dct.items():
escaped_value = attr_escape(value)
if key == 'klass':
lst.append('class="%s"' % escaped_value)
else:
lst.append('%s="%s"' % (key, escaped_value))
return ' '.join(lst)
def _attributes(key_value_pairs, dictionnary):
# note: if the key_value_pairs list is empty,
# the reference to the dictionnary is returned
if key_value_pairs:
attributes = dict(key_value_pairs)
attributes.update(dictionnary)
return attributes
else:
return dictionnary
| {
"repo_name": "ritashugisha/ASUEvents",
"path": "ASUEvents/MetronicElements/yattag/simpledoc.py",
"copies": "1",
"size": "7957",
"license": "mit",
"hash": 235635698391779040,
"line_mean": 31.8801652893,
"line_max": 90,
"alpha_frac": 0.5014452683,
"autogenerated": false,
"ratio": 4.362390350877193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5363835619177193,
"avg_score": null,
"num_lines": null
} |
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET/HEAD/POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method. And can reveive file uploaded
by client.
The GET/HEAD/POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTPWithUpload/"
def do_GET(self):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
f = self.send_head()
if f:
f.close()
def do_POST(self):
r, info = self.deal_post_data()
print r, info, "by: ", self.client_address
fileName = os.path.basename(info)
fileName = fileName.split('.')[0]
self.send_response(301)
self.send_header('Location', 'http://localhost:8000/go.html?file=' + fileName)
self.end_headers()
def deal_post_data(self):
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n")
f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write("<input name=\"file\" type=\"file\"/>")
f.write("<input type=\"submit\" value=\"upload\"/></form>\n")
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test() | {
"repo_name": "wrongu/AlphaGo",
"path": "interface/server/goServer.py",
"copies": "1",
"size": "9254",
"license": "mit",
"hash": -6806876144680090000,
"line_mean": 33.9245283019,
"line_max": 90,
"alpha_frac": 0.5778041928,
"autogenerated": false,
"ratio": 4.191123188405797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5268927381205797,
"avg_score": null,
"num_lines": null
} |
__all__ = ["SimpleHTTPRequestHandler"]
import time
import os
import posixpath
import BaseHTTPServer
import urllib
import urlparse
import cgi
import shutil
import mimetypes
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
def do_GET(self):
"""Serve a GET request."""
for i in xrange(10):
print i,self.path
time.sleep(1)
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(os.fstat(f.fileno())[6]))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write("<title>Directory listing for %s</title>\n" % displaypath)
f.write("<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urlparse.urlparse(path)[2]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| {
"repo_name": "flashscope/testJavaWebServer",
"path": "public_html/ex07_svr1.py",
"copies": "1",
"size": "6656",
"license": "apache-2.0",
"hash": -1100281477096939100,
"line_mean": 32.6161616162,
"line_max": 78,
"alpha_frac": 0.5914963942,
"autogenerated": false,
"ratio": 4.319273199221285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018583334450207514,
"num_lines": 198
} |
__all__ = ['simplex_array_searchsorted','simplex_array_boundary','simplex_array_parity']
from scipy import ravel, zeros, ones, arange, empty, alltrue, array, lexsort, \
hstack, vstack, rank, bincount, cumsum, ascontiguousarray, zeros_like, \
concatenate, asarray
from scipy.sparse import csr_matrix
def simplex_array_searchsorted(s, v):
"""Find the row indices (of s) corresponding to the simplices stored
in the rows of simplex array v. The rows of s must be stored in
lexicographical order.
Example
-------
>>> from numpy import array
>>> s = array([[0,1],[0,2],[1,2],[1,3]])
>>> v = array([[1,2],[0,2]])
>>> simplex_array_searchsorted(s,v)
array([2, 1])
"""
s = asarray(s)
v = asarray(v)
if rank(s) != 2 or rank(v) != 2:
raise ValueError('expected rank 2 arrays')
if s.shape[1] != v.shape[1]:
raise ValueError('number of columns must agree')
# compute row indices by sorting both arrays together
Ns = s.shape[0]
Nv = v.shape[0]
perm = lexsort(vstack((s,v))[:,::-1].T)
flags = concatenate( (ones(Ns,dtype=int),zeros(Nv,dtype=int)) )
indices = empty(Ns+Nv, dtype=int)
indices[perm] = cumsum(flags[perm])
indices = indices[Ns:].copy()
indices -= 1
return indices
def simplex_array_parity(s):
"""Compute the relative parity of an array of simplices
"""
s = s.copy()
M,N = s.shape
# number of transpositions used to sort the
# indices of each simplex (row of s)
trans = zeros_like(s[:,0])
seq = arange(M)
for i in range(N-1):
pos = s.argmin(axis=1)
s[seq,pos] = s[:,0]
pos.clip(0,1,pos)
trans += pos
s = s[:,1:]
trans %= 2 #compute parity
return trans
def simplex_array_boundary(s,parity):
"""
Compute the boundary faces and boundary operator of an
array of simplices with given simplex parities
E.g.
For a mesh with two triangles [0,1,2] and [1,3,2], the second
triangle has opposite parity relative to sorted order.
simplex_array_boundary(array([[0,1,2],[1,2,3]]),array([0,1]))
"""
#TODO handle edge case as special case
num_simplices = s.shape[0]
faces_per_simplex = s.shape[1]
num_faces = num_simplices * faces_per_simplex
orientations = 1 - 2*parity
#faces[:,:-2] are the indices of the faces
#faces[:,-2] is the index of the simplex whose boundary produced the face
#faces[:,-1] is the orientation of the face in the boundary of the simplex
faces = empty((num_faces,s.shape[1]+1),dtype=s.dtype)
for i in range(faces_per_simplex):
rows = faces[num_simplices*i:num_simplices*(i+1)]
rows[:, : i] = s[:, :i]
rows[:,i :-2] = s[:,i+1: ]
rows[:, -2 ] = arange(num_simplices)
rows[:, -1 ] = ((-1)**i)*orientations
#sort rows
faces = faces[lexsort( faces[:,:-2].T[::-1] )]
#find unique faces
face_mask = -hstack((array([False]),alltrue(faces[1:,:-2] == faces[:-1,:-2],axis=1)))
unique_faces = faces[face_mask,:-2]
#compute CSR representation for boundary operator
csr_indptr = hstack((arange(num_faces)[face_mask],array([num_faces])))
csr_indices = ascontiguousarray(faces[:,-2])
csr_data = faces[:,-1].astype('int8')
shape = (len(unique_faces),num_simplices)
boundary_operator = csr_matrix((csr_data,csr_indices,csr_indptr), shape)
return unique_faces,boundary_operator
####################################
## Fast C implementations
####################################
#
#
#import scipy
#
#def simplex_array_searchsorted(s,v):
# """
# Find the row indices (of s) corresponding to the
# simplices stored in the rows of simplex array v.
# It is assumed that the rows of s are sorted in
# lexicographical order.
#
# Example:
#
# s = array([[0,1],[0,2],[1,2],[1,3]])
# v = array([[1,2],[0,2]])
# simplex_array_searchsorted(s,v)
#
# Returns:
#
# array([2,1])
#
# """
#
# if rank(s) != 2 or rank(v) != 2:
# raise ValueError,'expected rank 2 arrays'
#
# if s.shape[1] != v.shape[1]:
# raise ValueError,'number of columns must agree'
#
# s_row = s.shape[0]
# v_row = v.shape[0]
# s_col = s.shape[1]
# s_max = int(s[:,0].max())
#
# first_index = cumsum(hstack((array([0]),bincount(s[:,0]))))
# indices = empty(v.shape[0],dtype=s.dtype)
#
# code = """
# #line 45 "simplex_array.py"
#
# for(int i = 0; i < v_row; i++){
#
# int v_i0 = v(i,0);
#
# if (v(i,0) > s_max)
# py::fail(PyExc_ValueError, "index exceeds expected range");
#
# int row_start = first_index(v_i0);
# int row_end = first_index(v_i0+1);
#
# int row_index = -1;
# for(int k = row_start; k < row_end; k++){
# bool mismatch = false;
# for(int j = 1; j < s_col; j++){
# if(v(i,j) != s(k,j)){
# mismatch = true;
# break;
# }
# }
# if(!mismatch){
# row_index = k;
# break;
# }
# }
# if (row_index == -1)
# py::fail(PyExc_ValueError, "simplex not found");
#
# indices(i) = row_index;
# }
# """
#
# err = scipy.weave.inline(code,
# ['s','v','first_index', 'indices', 's_row', 'v_row', 's_col','s_max'],
# type_converters = scipy.weave.converters.blitz,
# compiler = 'gcc')
#
# return indices
#
#
#
#def simplex_array_parity(s):
# """
# Compute the relative parity of an array of simplices
# """
# perms = s.argsort()
#
#
# n_rows,n_cols = perms.shape
#
# parity = zeros(n_rows,dtype=perms.dtype)
# seen = zeros(n_cols,dtype=perms.dtype)
#
# code = """
# #line 26 "relaxation.py"
#
# for(int i = 0; i < n_rows; i++){
# int num_cycles = 0;
# for(int j = 0; j < n_cols; j++){
#
# if(seen(j) == 1) continue;
#
# int k = j;
#
# num_cycles++;
# while ( true ){
# seen(k) = 1;
# k = perms(i,k);
# if (j == k) break;
# }
# }
# for(int j = 0; j < n_cols; j++){ seen(j) = 0; } //reset seen
# parity(i) = (n_cols - num_cycles) % 2;
# }
# """
#
# from time import clock
#
#
# # compiler keyword only needed on windows with MSVC installed
# err = scipy.weave.inline(code,
# ['perms', 'parity', 'seen', 'n_rows', 'n_cols' ],
# type_converters = scipy.weave.converters.blitz,
# compiler = 'gcc')
#
# return parity
| {
"repo_name": "whereisravi/pydec",
"path": "pydec/dec/simplex_array.py",
"copies": "6",
"size": "6808",
"license": "bsd-3-clause",
"hash": -5970975961294593000,
"line_mean": 25.1846153846,
"line_max": 100,
"alpha_frac": 0.5243830787,
"autogenerated": false,
"ratio": 3.0352206865804727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019994438219151815,
"num_lines": 260
} |
__all__ = ['Simplex','SimplicialMesh','simplex','simplicial_mesh']
from pydec.math import signed_volume,relative_parity,combinations
from base_mesh import base_mesh
from scipy import asarray
import numpy,scipy
class simplex(tuple):
def __new__(cls,s,parity=0):
obj = tuple.__new__(cls,sorted(s))
obj.parity = relative_parity(obj,s) ^ parity
return obj
def __repr__(self):
return 'simplex(' + tuple.__repr__(self) + ',parity=' + str(self.parity) + ')'
def boundary(self):
"""
A list of oriented simplicies in the boundary of this simplex
"""
return [ simplex(self[:n] + self[n+1:],(self.parity + n) % 2) for n in range(len(self)) ]
class simplicial_mesh(base_mesh):
"""Simplicial mesh
Can be instantiated in several ways:
- simplicial_mesh(V,E)
- where V and E are arrays of vertices and simplex indices
- simplicial_mesh( D )
- where D is a dictionary with keys 'vertices' and 'elements'
Examples
========
>>> from numpy import array
>>> from pydec.mesh import simplicial_mesh
>>> V = array([[0,0],[1,0],[0,1]]) # mesh vertices
>>> E = array([[0,1,2]]) # mesh indices
>>> simplicial_mesh(V,E)
>>> D = {'vertices' : V, 'elements' : E}
>>> simplicial_mesh(data)
"""
def __init__(self,*args,**kwargs):
if len(args) == 2:
#try to parse as (vertices,indices)
V,I = args
self['vertices'] = asarray(V)
self['elements'] = asarray(I)
elif len(kwargs) == 2:
self['vertices'] = asarray(kwargs['vertices'])
self['elements'] = asarray(kwargs['indices'])
elif len(args) == 1 and isinstance(args[0],dict):
base_mesh.update(self,args[0])
else:
raise ValueError,'unrecognized arguments'
if numpy.rank(self['elements']) != 2 or numpy.rank(self['vertices']) != 2:
raise ValueError('index and vertex arrays must have rank 2')
if self['elements'].min() < 0 or self['elements'].max() > self['vertices'].shape[0]:
raise ValueError('invalid index value')
def __getattr__(self,attr):
if attr == 'vertices':
return self['vertices']
elif attr in ['indices','elements']:
return self['elements']
else:
return base_mesh.__getattr__(self,attr)
def __setattr__(self,attr,value):
if attr == 'vertices':
self['vertices'] = value
elif attr in ['indices','elements']:
self['elements'] = value
else:
return base_mesh.__setattr__(self,attr,value)
def __repr__(self):
output = ""
output += "simplicial_mesh< " + str(self.manifold_dimension()) + "D manifold, "
output += str(self.embedding_dimension()) + "D embedding, "
output += str(self['vertices'].shape[0]) + " vertices, "
output += str(self['elements'].shape[0]) + " elements >\n"
format_str = '\t%-16s %16s %10s\n'
output += format_str % ('Data Names'.center(16),'Shape'.center(16),'Size (KB)'.center(16))
for k,v in self.iteritems():
output += format_str % (k,str(v.shape),str(v.nbytes/1024))
return output
def manifold_dimension(self):
return self['elements'].shape[1] - 1
def embedding_dimension(self):
return self['vertices'].shape[1]
def boundary(self):
"""
Return a set() of the boundary simplices, i.e. the faces
of the top level simplices that occur only once
"""
boundary_set = set()
for row in self['elements']:
s = simplex(row)
for b in s.boundary():
if b in boundary_set:
boundary_set.remove(b) #b has occured twice
else:
boundary_set.add(b) #first occurance of b
return boundary_set
def skeleton(self,p):
"""
Returns the p-skeleton (all the p-faces) of the mesh as a set()
"""
assert(0 <= p <= self.manifold_dimension())
skeleton_set = set()
for row in self.indices:
for b in combinations(row,p+1):
skeleton_set.add(simplex(b))
return skeleton_set
def orient(self):
"""
Orient this SimplicialMesh. If the manifold is of the same dimension as the
embedding (e.g. triangle mesh in 2D, tet mesh in 3D) then the resultant mesh
will be oriented so that the simplices have positive volume.
If the mesh is not orientable an Exception is raised.
"""
if self.manifold_dimension() == 0:
#0-dimensional manifold is alway oriented
return
if self.manifold_dimension() == self.embedding_dimension():
#orient w/ positive volumes
num_flips = 0
elements = self['elements']
vertices = self['vertices']
for row in elements:
pts = vertices[row]
if signed_volume(pts) < 0:
num_flips += 1
temp = row[0]
row[0] = row[1]
row[1] = temp
print "Flipped",num_flips,"simplices"
return
raise NotImplementedError
simplex_to_index = {}
for index,row in enumerate(self['elements']):
simplex_to_index[simplex(row)] = index
faces = self.skeleton(self.manifold_dimension() - 1)
face_to_simplex = dict.fromkeys(faces,set())
for simplex,index in simplex_to_index.iterkeys():
for b in simplex.boundary():
face_to_simplex[b].add(index)
simplex_neigbors = [[]]*len(self['elements'])
for simplex,index in simplex_to_index.iterkeys():
for b in simplex.boundary():
simplex_neigbors[index].append(face_to_simplex[b] - set([index]))
print simplex_neighbors
#for backwards compatibility
Simplex = simplex
SimplicialMesh = simplicial_mesh
| {
"repo_name": "whereisravi/pydec",
"path": "pydec/mesh/simplex.py",
"copies": "6",
"size": "6366",
"license": "bsd-3-clause",
"hash": 1304307029112823000,
"line_mean": 31.4795918367,
"line_max": 99,
"alpha_frac": 0.5326735784,
"autogenerated": false,
"ratio": 4.0214782059380925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025803918170703204,
"num_lines": 196
} |
__all__ = ['SimplicialComplex','simplicial_complex']
from warnings import warn
import numpy
import scipy
from scipy import sparse, zeros, asarray, mat, hstack
import pydec
from pydec.mesh.simplex import simplex, simplicial_mesh
from pydec.math import circumcenter, unsigned_volume, signed_volume
from simplex_array import simplex_array_parity, simplex_array_boundary
from cochain import cochain
class simplicial_complex(list):
"""simplicial complex
This can be instantiated in several ways:
- simplicial_complex( (V,S) )
- where V and S are arrays of vertices and simplex indices
- simplicial_complex( M )
- where M is a simplicial_mesh object
Examples
========
>>> from pydec import simplicial_complex, simplicial_mesh
>>> V = [[0,0],[1,0],[1,1],[0,1]]
>>> S = [[0,1,3],[1,2,3]]
>>> M = simplicial_mesh(V, S)
>>> simplicial_complex( (V,S) )
>>> simplicial_complex( M )
"""
def __init__(self, arg1, arg2=None):
if arg2 is not None:
warn('initializing a simplicial_complex with' \
' format (vertices,simplices) is deprecated', \
DeprecationWarning)
arg1 = (arg1,arg2)
if isinstance(arg1,simplicial_mesh):
self.mesh = arg1
elif isinstance(arg1,tuple):
self.mesh = simplicial_mesh(arg1[0], arg1[1])
else:
raise ValueError('unrecognized constructor usage')
#TODO is this necessary?
#if self.mesh['vertices'].dtype != 'float32':
# self.mesh['vertices'] = asarray(self.mesh['vertices'],dtype='float64')
self.vertices = self.mesh['vertices']
self.simplices = self.mesh['elements']
self.build_complex(self.simplices)
def __repr__(self):
output = ""
output += "simplicial_complex:\n"
output += " complex:\n"
for i in reversed(range(len(self))):
output += " %10d: %2d-simplices\n" % (self[i].num_simplices,i)
return output
def complex_dimension(self):
return self.simplices.shape[1] - 1
def embedding_dimension(self):
return self.vertices.shape[1]
def chain_complex(self):
return [x.boundary for x in self]
def cochain_complex(self):
return [x.d for x in self]
def complex(self):
return [x.simplices for x in self]
def get_cochain(self, dimension, is_primal=True):
N = self.complex_dimension()
if not 0 <= dimension <= N:
raise ValueError('invalid dimension (%d)' % dimension)
c = cochain(self, dimension, is_primal)
if is_primal:
c.v = zeros(self[dimension].num_simplices)
else:
c.v = zeros(self[N - dimension].num_simplices)
return c
def get_cochain_basis(self, dimension, is_primal=True):
N = self.complex_dimension()
if not 0 <= dimension <= N:
raise ValueError('invalid dimension (%d)' % dimension)
c = cochain(self, dimension, is_primal)
if is_primal:
c.v = sparse.identity(self[dimension].num_simplices)
else:
c.v = sparse.identity(self[N - dimension].num_simplices)
return c
def build_complex(self, simplex_array):
"""Compute faces and boundary operators for all dimensions"""
N,K = simplex_array.shape
s = simplex_array.copy()
parity = simplex_array_parity(s)
s.sort()
simplices = [s]
chain_complex = []
parities = [parity]
while s.shape[1] > 1:
s,boundary = simplex_array_boundary(s,parity)
parity = zeros(s.shape[0],dtype=s.dtype)
simplices.append( s )
chain_complex.append( boundary )
parities.append( parity )
B0 = sparse.csr_matrix( (1,len(s)), dtype='uint8')
chain_complex.append( B0 )
simplices = simplices[::-1]
chain_complex = chain_complex[::-1]
parities = parities[::-1]
Bn = chain_complex[-1]
cochain_complex = [ B.T for B in chain_complex[1:] ]
cochain_complex += [ sparse.csc_matrix( (1, Bn.shape[1]), dtype=Bn.dtype) ]
for n in range(K):
data = self.data_cache()
data.d = cochain_complex[n]
data.boundary = chain_complex[n]
data.complex = self
data.dim = n
data.simplices = simplices[n]
data.num_simplices = len(data.simplices)
data.simplex_parity = parities[n]
self.append(data)
def construct_hodge(self):
"""Construct the covolume Hodge star for all levels"""
for dim,data in enumerate(self):
form_size = data.num_simplices
data.star = sparse.lil_matrix((form_size,form_size))
data.star_inv = sparse.lil_matrix((form_size,form_size))
stardiag = data.dual_volume / data.primal_volume
N = len(stardiag)
data.star = sparse.spdiags([stardiag], [0], N, N, format='csr')
data.star_inv = sparse.spdiags([1.0/stardiag],[0], N, N, format='csr')
#Choose sign of star_inv to satisfy (star_inv * star) = -1 ^(k*(n-k))
n,k = self.complex_dimension(),dim
data.star_inv *= (-1) ** (k * (n - k))
def compute_circumcenters(self,dim):
"""Compute circumcenters for all simplices at a given dimension
"""
data = self[dim]
data.circumcenter = zeros((data.num_simplices,self.embedding_dimension()))
for i,s in enumerate(data.simplices):
pts = self.vertices[[x for x in s],:]
data.circumcenter[i] = circumcenter(pts)[0]
def compute_primal_volume(self,dim):
"""Compute the volume of all simplices for a given dimension
If the top simplex is of the same dimension as its embedding,
the signed volume is computed.
"""
data = self[dim]
data.primal_volume = zeros((data.num_simplices,))
if dim == self.embedding_dimension():
for i,s in enumerate(self.simplices):
pts = self.vertices[s,:]
data.primal_volume[i] = signed_volume(pts)
else:
for i,s in enumerate(data.simplices):
pts = self.vertices[s,:]
data.primal_volume[i] = unsigned_volume(pts)
def compute_dual_volume(self):
"""Compute dual volumes for simplices of all dimensions
"""
for dim,data in enumerate(self):
data.dual_volume = zeros((data.num_simplices,))
temp_centers = zeros((self.complex_dimension()+1,self.embedding_dimension()))
for i,s in enumerate(self.simplices):
self.__compute_dual_volume(simplex(s),temp_centers,self.complex_dimension())
def __compute_dual_volume(self,s,pts,dim):
data = self[dim]
index = data.simplex_to_index[s]
pts[dim] = data.circumcenter[index]
data.dual_volume[index] += unsigned_volume(pts[dim:,:])
if dim > 0:
for bs in s.boundary():
self.__compute_dual_volume(bs,pts,dim-1)
def boundary(self):
"""Return a list of the boundary simplices, i.e. the faces of the top level simplices that occur only once
"""
assert(self.complex_dimension() > 0)
face_count = dict.fromkeys(self[self.complex_dimension() - 1].simplex_to_index.iterkeys(),0)
for s in self[self.complex_dimension()].simplex_to_index.iterkeys():
for f in s.boundary():
face_count[f] += 1
boundary_faces = [f for f,count in face_count.iteritems() if count == 1]
return boundary_faces
class data_cache:
"""caches the result of costly operations"""
def __getattr__(self,attr):
#print "constructing: ",attr
if attr == "d":
return self.d.tocsr()
elif attr == "star":
self.complex.construct_hodge()
return self.star
elif attr == "star_inv":
self.complex.construct_hodge()
return self.star_inv
elif attr == "circumcenter":
self.complex.compute_circumcenters(self.dim)
return self.circumcenter
elif attr == "primal_volume":
self.complex.compute_primal_volume(self.dim)
return self.primal_volume
elif attr == "dual_volume":
self.complex.compute_dual_volume()
return self.dual_volume
elif attr == "simplex_to_index":
self.simplex_to_index = dict(zip([simplex(x) for x in self.simplices],xrange(len(self.simplices))))
return self.simplex_to_index
elif attr == "index_to_simplex":
self.simplex_to_index = dict(zip(xrange(len(self.simplices)),[simplex(x) for x in self.simplices]))
return self.simplex_to_index
else:
raise AttributeError, attr + " not found"
#for backwards compatibility
SimplicialComplex = simplicial_complex
| {
"repo_name": "DongliangGao/pydec",
"path": "pydec/dec/simplicial_complex.py",
"copies": "6",
"size": "9674",
"license": "bsd-3-clause",
"hash": 2920776700342406000,
"line_mean": 33.304964539,
"line_max": 115,
"alpha_frac": 0.5461029564,
"autogenerated": false,
"ratio": 3.9055308841340333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7451633840534032,
"avg_score": null,
"num_lines": null
} |
__all__ = ['simplicial_grid_2d','cube_grid']
from scipy import zeros,resize,arange,ravel,concatenate,matrix, \
transpose,prod,mgrid,ndindex,sum,array,cumprod,tile,ones
import scipy
def simplicial_grid_2d(n):
"""
Create an NxN 2d grid in the unit square
The number of vertices along each axis is (N+1) for a total of (N+1)x(N+1) vertices
A tuple (vertices,indices) of arrays is returned
"""
vertices = zeros(((n+1)**2,2))
vertices[:,0] = ravel(resize(arange(n+1),(n+1,n+1)))
vertices[:,1] = ravel(transpose(resize(arange(n+1),(n+1,n+1))))
vertices /= n
indices = zeros((2*(n**2),3),scipy.int32)
t1 = transpose(concatenate((matrix(arange(n)),matrix(arange(1,n+1)),matrix(arange(n+2,2*n+2))),axis=0))
t2 = transpose(concatenate((matrix(arange(n)),matrix(arange(n+2,2*n+2)),matrix(arange(n+1,2*n+1))),axis=0))
first_row = concatenate((t1,t2))
for i in xrange(n):
indices[(2*n*i):(2*n*(i+1)),:] = first_row + i*(n+1)
return (vertices,indices)
def cube_grid(dims):
"""
Return a regular nD-cube mesh with given shape.
Eg.
cube_grid_nd((2,2)) -> 2x2 - 2d mesh (x,y)
cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)
Eg.
v,i = cube_grid_nd((2,1))
v =
array([[ 0., 0.],
[ 1., 0.],
[ 2., 0.],
[ 0., 1.],
[ 1., 1.],
[ 2., 1.]])
i =
array([[[0, 3],
[1, 4]],
[[1, 4],
[2, 5]]])
"""
dims = tuple(dims)
vert_dims = tuple(x+1 for x in dims)
N = len(dims)
vertices = zeros((prod(vert_dims),N))
grid = mgrid[tuple(slice(0,x,None) for x in reversed(vert_dims))]
for i in range(N):
vertices[:,i] = ravel(grid[N-i-1])
#construct one cube to be tiled
cube = zeros((2,)*N,dtype='i')
cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')
for i in ndindex(*((2,)*N)):
cube[i] = sum(array(i) * cycle)
cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')
#indices of all vertices which are the lower corner of a cube
interior_indices = arange(prod(vert_dims)).reshape(tuple(reversed(vert_dims))).T
interior_indices = interior_indices[tuple(slice(0,x,None) for x in dims)]
indices = tile(cube,(prod(dims),) + (1,)*N) + interior_indices.reshape((prod(dims),) + (1,)*N)
return (vertices,indices)
| {
"repo_name": "whereisravi/pydec",
"path": "pydec/mesh/generation.py",
"copies": "7",
"size": "2495",
"license": "bsd-3-clause",
"hash": 2301288058153898000,
"line_mean": 27.3522727273,
"line_max": 111,
"alpha_frac": 0.5386773547,
"autogenerated": false,
"ratio": 2.9352941176470586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6973971472347059,
"avg_score": null,
"num_lines": null
} |
__all__ = ["simulate_displacement_field"]
import numpy as np
from ..core import ants_image as iio
from .. import utils
def simulate_displacement_field(domain_image,
field_type="bspline",
number_of_random_points=1000,
sd_noise=10.0,
enforce_stationary_boundary=True,
number_of_fitting_levels=4,
mesh_size=1,
sd_smoothing=4.0):
"""
simulate displacement field using either b-spline or exponential transform
ANTsR function: `simulateDisplacementField`
Arguments
---------
domain_image : ANTsImage
Domain image
field_type : string
Either "bspline" or "exponential".
number_of_random_points : integer
Number of displacement points.
sd_noise : float
Standard deviation of the displacement field noise.
enforce_stationary_boundary : boolean
Determines fixed boundary conditions.
number_of_fitting_levels : integer
Number of fitting levels (b-spline only).
mesh_size : integer or n-D tuple
Determines fitting resolution at base level (b-spline only).
sd_smoothing : float
Standard deviation of the Gaussian smoothing in mm (exponential only).
Returns
-------
ANTs vector image.
Example
-------
>>> import ants
>>> domain = ants.image_read( ants.get_ants_data('r16'))
>>> exp_field = ants.simulate_displacement_field(domain, field_type="exponential")
>>> bsp_field = ants.simulate_displacement_field(domain, field_type="bspline")
>>> bsp_xfrm = ants.transform_from_displacement_field(bsp_field * 3)
>>> domain_warped = ants.apply_ants_transform_to_image(bsp_xfrm, domain, domain)
"""
image_dimension = domain_image.dimension
if field_type == 'bspline':
if isinstance(mesh_size, int) == False and len(mesh_size) != image_dimension:
raise ValueError("Incorrect specification for mesh_size.")
spline_order = 3
number_of_control_points = mesh_size + spline_order
if isinstance(number_of_control_points, int) == True:
number_of_control_points = np.repeat(number_of_control_points, image_dimension)
libfn = utils.get_lib_fn("simulateBsplineDisplacementField%iD" % image_dimension)
field = libfn(domain_image.pointer, number_of_random_points, sd_noise,
enforce_stationary_boundary, number_of_fitting_levels, number_of_control_points)
bspline_field = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=image_dimension,
pointer=field).clone('float')
return bspline_field
elif field_type == 'exponential':
libfn = utils.get_lib_fn("simulateExponentialDisplacementField%iD" % image_dimension)
field = libfn(domain_image.pointer, number_of_random_points, sd_noise,
enforce_stationary_boundary, sd_smoothing)
exp_field = iio.ANTsImage(pixeltype='float',
dimension=image_dimension, components=image_dimension,
pointer=field).clone('float')
return exp_field
else:
raise ValueError("Unrecognized field type.")
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/simulate_displacement_field.py",
"copies": "1",
"size": "3368",
"license": "apache-2.0",
"hash": -1728118725614469000,
"line_mean": 35.6086956522,
"line_max": 102,
"alpha_frac": 0.6199524941,
"autogenerated": false,
"ratio": 4.142681426814268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5262633920914268,
"avg_score": null,
"num_lines": null
} |
"""All single flight related functionalities and representations"""
from flight_genie.utils import (
get_value_by_key_in_pairs_list,
get_numerical_value,
month_day_from_date,
weekday_from_date,
country_from_airport,
city_code_from_airport,
days_in_range,
)
class Flight(object):
"""Simple representation of a flight. Just containing properties"""
PARAMETERS = (
'date',
'dayofmonth',
'weekday',
'outbounddate',
'outbounddayofmonth',
'outboundweekday',
'inbounddate',
'inbounddayofmonth',
'inboundweekday',
'originairport',
'origincitycode',
'origincountry',
'destinationairport',
'destinationcitycode',
'destinationcountry',
'carriercode',
'carriertype',
'adults',
'children',
'daystodeparture',
'dayslengthofstay',
'priceusd',
'platform',
'isota'
)
def __init__(self, pairs_list):
"""Sets the flight properties from a list of pairs"""
self.__pairs_list = pairs_list
@classmethod
def get_from_core_data(cls, pairs_list):
"""Get a full flight from the core data
Infers the values of some parameters. See README for more details
"""
full_pairs_list = []
current_parameters = [p[0] for p in pairs_list]
for param in cls.PARAMETERS:
if param in current_parameters:
param_val = get_value_by_key_in_pairs_list(pairs_list, param)
else:
inferring_dict = cls.INFERRING_FUNCTIONS[param]
core_vals = (get_value_by_key_in_pairs_list(pairs_list, c)
for c in inferring_dict['core'])
param_val = inferring_dict['function'](*[v for v in core_vals])
full_pairs_list.append((param, param_val))
return cls(full_pairs_list)
def get_attribute(self, attr_name):
"""Gets the value of a atributed labels attr_name"""
return get_value_by_key_in_pairs_list(self.__pairs_list, attr_name)
def to_numerical_list(self, excluded_attributes=[]):
"""Return an array of numbers by a certain order"""
return [get_numerical_value(pair[1])
for pair in self.__pairs_list if
pair[0] not in excluded_attributes]
def to_string_list(self, excluded_attributes=[]):
"""Return an array of strings by a certain order."""
return [str(v) for v in self.to_numerical_list(excluded_attributes)]
def get_travellers_count(self):
"""Return the number of adults + children for the purchase"""
return (float(self.get_attribute("adults")) +
float(self.get_attribute("children")))
def get_price_per_ticket(self):
"""Get the price for a single ticket in the purchase"""
return (float(self.get_attribute('priceusd')) /
self.get_travellers_count())
def __str__(self):
"""A good representation as a string"""
to_append = ''
for pair in self.__pairs_list:
to_append += '{}: {} '.format(pair[0], pair[1])
return to_append
INFERRING_FUNCTIONS = {
'dayofmonth': {
'core': ['date'],
'function': month_day_from_date
},
'weekday': {
'core': ['date'],
'function': weekday_from_date
},
'outbounddayofmonth': {
'core': ['outbounddate'],
'function': month_day_from_date
},
'outboundweekday': {
'core': ['outbounddate'],
'function': weekday_from_date
},
'inbounddayofmonth': {
'core': ['inbounddate'],
'function': month_day_from_date
},
'inboundweekday': {
'core': ['inbounddate'],
'function': weekday_from_date
},
'origincitycode': {
'core': ['originairport'],
'function': city_code_from_airport
},
'origincountry': {
'core': ['originairport'],
'function': country_from_airport
},
'destinationcitycode': {
'core': ['destinationairport'],
'function': city_code_from_airport
},
'destinationcountry': {
'core': ['destinationairport'],
'function': country_from_airport
},
'daystodeparture': {
'core': ['date', 'outbounddate'],
'function': days_in_range
},
'dayslengthofstay': {
'core': ['outbounddate', 'inbounddate'],
'function': days_in_range
},
}
| {
"repo_name": "lyuboraykov/flight-genie",
"path": "flight_genie/flight.py",
"copies": "1",
"size": "4732",
"license": "mit",
"hash": 5350743397279617000,
"line_mean": 31.1904761905,
"line_max": 79,
"alpha_frac": 0.5422654269,
"autogenerated": false,
"ratio": 3.986520640269587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 147
} |
__all__ = ['sisotool']
from control.exception import ControlMIMONotImplemented
from .freqplot import bode_plot
from .timeresp import step_response
from .lti import issiso, isdtime
from .xferfcn import TransferFunction
from .bdalg import append, connect
import matplotlib
import matplotlib.pyplot as plt
import warnings
def sisotool(sys, kvect=None, xlim_rlocus=None, ylim_rlocus=None,
plotstr_rlocus='C0', rlocus_grid=False, omega=None, dB=None,
Hz=None, deg=None, omega_limits=None, omega_num=None,
margins_bode=True, tvect=None):
"""
Sisotool style collection of plots inspired by MATLAB's sisotool.
The left two plots contain the bode magnitude and phase diagrams.
The top right plot is a clickable root locus plot, clicking on the
root locus will change the gain of the system. The bottom left plot
shows a closed loop time response.
Parameters
----------
sys : LTI object
Linear input/output systems. If sys is SISO, use the same
system for the root locus and step response. If it is desired to
see a different step response than feedback(K*loop,1), sys can be
provided as a two-input, two-output system (e.g. by using
:func:`bdgalg.connect' or :func:`iosys.interconnect`). Sisotool
inserts the negative of the selected gain K between the first output
and first input and uses the second input and output for computing
the step response. This allows you to see the step responses of more
complex systems, for example, systems with a feedforward path into the
plant or in which the gain appears in the feedback path.
kvect : list or ndarray, optional
List of gains to use for plotting root locus
xlim_rlocus : tuple or list, optional
control of x-axis range, normally with tuple
(see :doc:`matplotlib:api/axes_api`).
ylim_rlocus : tuple or list, optional
control of y-axis range
plotstr_rlocus : :func:`matplotlib.pyplot.plot` format string, optional
plotting style for the root locus plot(color, linestyle, etc)
rlocus_grid : boolean (default = False)
If True plot s- or z-plane grid.
omega : array_like
List of frequencies in rad/sec to be used for bode plot
dB : boolean
If True, plot result in dB for the bode plot
Hz : boolean
If True, plot frequency in Hz for the bode plot (omega must be provided in rad/sec)
deg : boolean
If True, plot phase in degrees for the bode plot (else radians)
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s. Ignored if omega
is provided, and auto-generated if omitted.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
margins_bode : boolean
If True, plot gain and phase margin in the bode plot
tvect : list or ndarray, optional
List of timesteps to use for closed loop step response
Examples
--------
>>> sys = tf([1000], [1,25,100,0])
>>> sisotool(sys)
"""
from .rlocus import root_locus
# sys as loop transfer function if SISO
if not sys.issiso():
if not (sys.ninputs == 2 and sys.noutputs == 2):
raise ControlMIMONotImplemented(
'sys must be SISO or 2-input, 2-output')
# Setup sisotool figure or superimpose if one is already present
fig = plt.gcf()
if fig.canvas.manager.get_window_title() != 'Sisotool':
plt.close(fig)
fig,axes = plt.subplots(2, 2)
fig.canvas.manager.set_window_title('Sisotool')
# Extract bode plot parameters
bode_plot_params = {
'omega': omega,
'dB': dB,
'Hz': Hz,
'deg': deg,
'omega_limits': omega_limits,
'omega_num' : omega_num,
'sisotool': True,
'fig': fig,
'margins': margins_bode
}
# First time call to setup the bode and step response plots
_SisotoolUpdate(sys, fig,
1 if kvect is None else kvect[0], bode_plot_params)
# Setup the root-locus plot window
root_locus(sys, kvect=kvect, xlim=xlim_rlocus,
ylim=ylim_rlocus, plotstr=plotstr_rlocus, grid=rlocus_grid,
fig=fig, bode_plot_params=bode_plot_params, tvect=tvect, sisotool=True)
def _SisotoolUpdate(sys, fig, K, bode_plot_params, tvect=None):
title_font_size = 10
label_font_size = 8
# Get the subaxes and clear them
ax_mag, ax_rlocus, ax_phase, ax_step = \
fig.axes[0], fig.axes[1], fig.axes[2], fig.axes[3]
# Catch matplotlib 2.1.x and higher userwarnings when clearing a log axis
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax_step.clear(), ax_mag.clear(), ax_phase.clear()
sys_loop = sys if sys.issiso() else sys[0,0]
# Update the bodeplot
bode_plot_params['syslist'] = sys_loop*K.real
bode_plot(**bode_plot_params)
# Set the titles and labels
ax_mag.set_title('Bode magnitude',fontsize = title_font_size)
ax_mag.set_ylabel(ax_mag.get_ylabel(), fontsize=label_font_size)
ax_mag.tick_params(axis='both', which='major', labelsize=label_font_size)
ax_phase.set_title('Bode phase',fontsize=title_font_size)
ax_phase.set_xlabel(ax_phase.get_xlabel(),fontsize=label_font_size)
ax_phase.set_ylabel(ax_phase.get_ylabel(),fontsize=label_font_size)
ax_phase.get_xaxis().set_label_coords(0.5, -0.15)
ax_phase.get_shared_x_axes().join(ax_phase, ax_mag)
ax_phase.tick_params(axis='both', which='major', labelsize=label_font_size)
ax_step.set_title('Step response',fontsize = title_font_size)
ax_step.set_xlabel('Time (seconds)',fontsize=label_font_size)
ax_step.set_ylabel('Output',fontsize=label_font_size)
ax_step.get_xaxis().set_label_coords(0.5, -0.15)
ax_step.get_yaxis().set_label_coords(-0.15, 0.5)
ax_step.tick_params(axis='both', which='major', labelsize=label_font_size)
ax_rlocus.set_title('Root locus',fontsize = title_font_size)
ax_rlocus.set_ylabel('Imag', fontsize=label_font_size)
ax_rlocus.set_xlabel('Real', fontsize=label_font_size)
ax_rlocus.get_xaxis().set_label_coords(0.5, -0.15)
ax_rlocus.get_yaxis().set_label_coords(-0.15, 0.5)
ax_rlocus.tick_params(axis='both', which='major',labelsize=label_font_size)
# Generate the step response and plot it
if sys.issiso():
sys_closed = (K*sys).feedback(1)
else:
sys_closed = append(sys, -K)
connects = [[1, 3],
[3, 1]]
sys_closed = connect(sys_closed, connects, 2, 2)
if tvect is None:
tvect, yout = step_response(sys_closed, T_num=100)
else:
tvect, yout = step_response(sys_closed, tvect)
if isdtime(sys_closed, strict=True):
ax_step.plot(tvect, yout, '.')
else:
ax_step.plot(tvect, yout)
ax_step.axhline(1.,linestyle=':',color='k',zorder=-20)
# Manually adjust the spacing and draw the canvas
fig.subplots_adjust(top=0.9,wspace = 0.3,hspace=0.35)
fig.canvas.draw()
| {
"repo_name": "python-control/python-control",
"path": "control/sisotool.py",
"copies": "1",
"size": "7184",
"license": "bsd-3-clause",
"hash": -7654380755537825000,
"line_mean": 39.3595505618,
"line_max": 91,
"alpha_frac": 0.6570155902,
"autogenerated": false,
"ratio": 3.3320964749536177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9446746300298009,
"avg_score": 0.008473152971121754,
"num_lines": 178
} |
__all__ = ['sisotool']
from .freqplot import bode_plot
from .timeresp import step_response
from .lti import issiso, isdtime
import matplotlib
import matplotlib.pyplot as plt
import warnings
def sisotool(sys, kvect = None, xlim_rlocus = None, ylim_rlocus = None,
plotstr_rlocus = 'b' if int(matplotlib.__version__[0]) == 1 else 'C0',
rlocus_grid = False, omega = None, dB = None, Hz = None,
deg = None, omega_limits = None, omega_num = None,
margins_bode = True, tvect=None):
"""
Sisotool style collection of plots inspired by MATLAB's sisotool.
The left two plots contain the bode magnitude and phase diagrams.
The top right plot is a clickable root locus plot, clicking on the
root locus will change the gain of the system. The bottom left plot
shows a closed loop time response.
Parameters
----------
sys : LTI object
Linear input/output systems (SISO only)
kvect : list or ndarray, optional
List of gains to use for plotting root locus
xlim_rlocus : tuple or list, optional
control of x-axis range, normally with tuple (see matplotlib.axes)
ylim_rlocus : tuple or list, optional
control of y-axis range
plotstr_rlocus : Additional options to matplotlib
plotting style for the root locus plot(color, linestyle, etc)
rlocus_grid: boolean (default = False)
If True plot s-plane grid.
omega : freq_range
Range of frequencies in rad/sec for the bode plot
dB : boolean
If True, plot result in dB for the bode plot
Hz : boolean
If True, plot frequency in Hz for the bode plot (omega must be provided in rad/sec)
deg : boolean
If True, plot phase in degrees for the bode plot (else radians)
omega_limits: tuple, list, ... of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num: int
number of samples
margins_bode : boolean
If True, plot gain and phase margin in the bode plot
tvect : list or ndarray, optional
List of timesteps to use for closed loop step response
Examples
--------
>>> sys = tf([1000], [1,25,100,0])
>>> sisotool(sys)
"""
from .rlocus import root_locus
# Check if it is a single SISO system
issiso(sys,strict=True)
# Setup sisotool figure or superimpose if one is already present
fig = plt.gcf()
if fig.canvas.get_window_title() != 'Sisotool':
plt.close(fig)
fig,axes = plt.subplots(2, 2)
fig.canvas.set_window_title('Sisotool')
# Extract bode plot parameters
bode_plot_params = {
'omega': omega,
'dB': dB,
'Hz': Hz,
'deg': deg,
'omega_limits': omega_limits,
'omega_num' : omega_num,
'sisotool': True,
'fig': fig,
'margins': margins_bode
}
# First time call to setup the bode and step response plots
_SisotoolUpdate(sys, fig,1 if kvect is None else kvect[0],bode_plot_params)
# Setup the root-locus plot window
root_locus(sys,kvect=kvect,xlim=xlim_rlocus,ylim = ylim_rlocus,plotstr=plotstr_rlocus,grid = rlocus_grid,fig=fig,bode_plot_params=bode_plot_params,tvect=tvect,sisotool=True)
def _SisotoolUpdate(sys,fig,K,bode_plot_params,tvect=None):
if int(matplotlib.__version__[0]) == 1:
title_font_size = 12
label_font_size = 10
else:
title_font_size = 10
label_font_size = 8
# Get the subaxes and clear them
ax_mag,ax_rlocus,ax_phase,ax_step = fig.axes[0],fig.axes[1],fig.axes[2],fig.axes[3]
# Catch matplotlib 2.1.x and higher userwarnings when clearing a log axis
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax_step.clear(), ax_mag.clear(), ax_phase.clear()
# Update the bodeplot
bode_plot_params['syslist'] = sys*K.real
bode_plot(**bode_plot_params)
# Set the titles and labels
ax_mag.set_title('Bode magnitude',fontsize = title_font_size)
ax_mag.set_ylabel(ax_mag.get_ylabel(), fontsize=label_font_size)
ax_phase.set_title('Bode phase',fontsize=title_font_size)
ax_phase.set_xlabel(ax_phase.get_xlabel(),fontsize=label_font_size)
ax_phase.set_ylabel(ax_phase.get_ylabel(),fontsize=label_font_size)
ax_phase.get_xaxis().set_label_coords(0.5, -0.15)
ax_phase.get_shared_x_axes().join(ax_phase, ax_mag)
ax_step.set_title('Step response',fontsize = title_font_size)
ax_step.set_xlabel('Time (seconds)',fontsize=label_font_size)
ax_step.set_ylabel('Amplitude',fontsize=label_font_size)
ax_step.get_xaxis().set_label_coords(0.5, -0.15)
ax_step.get_yaxis().set_label_coords(-0.15, 0.5)
ax_rlocus.set_title('Root locus',fontsize = title_font_size)
ax_rlocus.set_ylabel('Imag', fontsize=label_font_size)
ax_rlocus.set_xlabel('Real', fontsize=label_font_size)
ax_rlocus.get_xaxis().set_label_coords(0.5, -0.15)
ax_rlocus.get_yaxis().set_label_coords(-0.15, 0.5)
# Generate the step response and plot it
sys_closed = (K*sys).feedback(1)
if tvect is None:
tvect, yout = step_response(sys_closed, T_num=100)
else:
tvect, yout = step_response(sys_closed,tvect)
if isdtime(sys_closed, strict=True):
ax_step.plot(tvect, yout, 'o')
else:
ax_step.plot(tvect, yout)
ax_step.axhline(1.,linestyle=':',color='k',zorder=-20)
# Manually adjust the spacing and draw the canvas
fig.subplots_adjust(top=0.9,wspace = 0.3,hspace=0.35)
fig.canvas.draw()
| {
"repo_name": "roryyorke/python-control",
"path": "control/sisotool.py",
"copies": "1",
"size": "5605",
"license": "bsd-3-clause",
"hash": -2541408984538417700,
"line_mean": 36.119205298,
"line_max": 177,
"alpha_frac": 0.6508474576,
"autogenerated": false,
"ratio": 3.2268278641335635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9307267143648315,
"avg_score": 0.014081635617049705,
"num_lines": 151
} |
__all__ = ['SitemapIndex', 'Sitemap']
import os
import xml.etree.ElementTree as ET
from datetime import datetime
from lxml import etree
class BaseSiteMap(object):
_init_content = ''
CHANGEFREQ_ALWAYS = 'always'
CHANGEFREQ_HOURLY = 'hourly'
CHANGEFREQ_DAILY = 'daily'
CHANGEFREQ_WEEKLY = 'weekly'
CHANGEFREQ_MONTHLY = 'monthly'
CHANGEFREQ_YEARLY = 'yearly'
CHANGEFREQ_NEVER = 'never'
def __init__(self, filename, timezone='Z'):
self.filename = filename
self.timezone = timezone
if not os.path.exists(filename):
self._init_file()
self._read_file()
def _init_file(self):
with open(self.filename, 'w') as f:
f.write(self._init_content)
def _read_file(self):
self.tree = etree.parse(self.filename)
self.root = self.tree.getroot()
def save(self):
self.tree.write(self.filename, encoding='utf-8', xml_declaration=True)
def _format_iso8601(self, obj):
updated = '%Y-%m-%dT%H:%M' + self.timezone
return obj.strftime(updated)
class SitemapIndex(BaseSiteMap):
_init_content = '''<sitemapindex xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"></sitemapindex>'''
def add(self, index_url):
sitemap = ET.SubElement(self.root, 'sitemap')
loc = ET.SubElement(sitemap, 'loc')
loc.text = index_url
self.root.append(sitemap)
class Sitemap(BaseSiteMap):
_init_content = '''<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:n="http://www.google.com/schemas/sitemap-news/0.9"
xmlns:image="http://www.google.com/schemas/sitemap-image/1.1"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"></urlset>'''
def add(self, url, lastmod=None, changefreq=None, priority=None, **kwargs):
sitemap = ET.SubElement(self.root, 'url')
loc = ET.SubElement(sitemap, 'loc')
loc.text = url
if lastmod and isinstance(lastmod, datetime):
lm = ET.SubElement(sitemap, 'lastmod')
lm.text = self._format_iso8601(lastmod)
if changefreq:
cf = ET.SubElement(sitemap, 'changefreq')
cf.text = changefreq
if priority:
p = ET.SubElement(sitemap, 'priority')
p.text = priority
self.root.append(sitemap)
| {
"repo_name": "inabhi9/pysitemapgen",
"path": "pysitemapgen/__init__.py",
"copies": "1",
"size": "2519",
"license": "mit",
"hash": -7470767598461785000,
"line_mean": 30.8860759494,
"line_max": 90,
"alpha_frac": 0.6292179436,
"autogenerated": false,
"ratio": 3.394878706199461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9522656541136716,
"avg_score": 0.00028802173254891054,
"num_lines": 79
} |
__all__ = ['sm']
from math import exp, log10
def sm(hm, z=0):
"""stellar mass given halo mass, redshift (Eq. 3 of Behroozi+13)"""
a = 1.0/(1.0+z)
loghm = log10(hm)
logm1 = _logm1(a)
logsm = _logeps(a) + logm1 + _eff(loghm-logm1, a) - _eff(0., a)
return 10.0**logsm
def _eff(x, a):
alpha, delta, gamma = _alpha(a), _delta(a), _gamma(a)
f1 = -log10(10.0**(alpha*x)+1.0)
try:
f2 = delta*( log10(1.0+exp(x)) )**gamma / (1.+exp(10.0**(-x)))
except OverflowError:
f2 = 0.
return f1 + f2
def _nu(a):
return exp(-4.0*a*a)
def _logeps(a, eps_0=-1.777, eps_a=-0.006, eps_z=-0.000, eps_a2=-0.119):
return eps_0 + (eps_a*(a-1.0) + eps_z*(1.0/a-1.0))*_nu(a) + eps_a2*(a-1.0)
def _logm1(a, m1_0=11.514, m1_a=-1.793, m1_z=-0.251):
return m1_0 + (m1_a*(a-1.0) + m1_z*(1.0/a-1.0))*_nu(a)
def _alpha(a, alpha_0=-1.412, alpha_a=0.731):
return alpha_0 + (alpha_a*(a-1.0))*_nu(a)
def _delta(a, delta_0=3.508, delta_a=2.608, delta_z=-0.043):
return delta_0 + (delta_a*(a-1.0) + delta_z*(1.0/a-1.0))*_nu(a)
def _gamma(a, gamma_0=0.316, gamma_a=1.319, gamma_z=0.279):
return gamma_0 + (gamma_a*(a-1.0) + gamma_z*(1.0/a-1.0))*_nu(a)
def _xi(a, xi_0=0.218, xi_a=-0.023):
return xi_0 + xi_a*(a-1.0)
| {
"repo_name": "yymao/slackbots",
"path": "smhm.py",
"copies": "1",
"size": "1270",
"license": "mit",
"hash": 5018075728328070000,
"line_mean": 30.75,
"line_max": 78,
"alpha_frac": 0.5346456693,
"autogenerated": false,
"ratio": 1.9568567026194146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7946332116186949,
"avg_score": 0.009034051146493135,
"num_lines": 40
} |
__all__ = ['SortedDict']
from itertools import imap as _imap
class SortedDict(dict):
def __init__(self, value=None, kvio=False):
"""
SortedDict will implement Key Insertion Order (KIO: updates of values
do not affect the position of the key), Key Value Insertion Order (KVIO,
an existing key's position is removed and put at the back)
"""
self._kvio = kvio
self._fields = []
self.update(value or {})
def __setitem__(self, key, value, append=False, dict_setitem=dict.__setitem__):
if not key in self:
self._fields.append(key)
elif self._kvio or append:
self._fields.remove(key)
self._fields.append(key)
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
if key.startswith('_'):
del self.__dict__[key]
else:
dict_delitem(self, key)
self._fields.remove(key)
def __iter__(self):
'od.__iter__() <==> iter(od)'
for k in self._fields:
yield k
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
for k in reversed(self._fields):
yield k
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError as k:
return None
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
self.__setitem__(key, value)
def __delattr__(self, key):
try:
self.__delitem__(key)
except KeyError as k:
raise AttributeError(k)
def keys(self):
return self._fields
def values(self):
return [self[k] for k in self._fields]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def update(self, value):
for k, v in value.items():
self.__setitem__(k, v)
def items(self):
return [(k, self[k]) for k in self._fields]
def iteritems(self):
for k in self:
yield (k, self[k])
def pop(self, key, default=None):
v = dict.pop(self, key, default)
if key in self._fields:
self._fields.remove(key)
return v
def __repr__(self):
return '<%s {%s}>' % (self.__class__.__name__, ', '.join(['%r:%r' % (k, v) for k, v in sorted(self.items())]))
def dict(self):
return self
def copy(self):
return self.__class__(self)
# def sort(self, cmp=None, key=None, reverse=False):
# self._fields = [x for x, y in sorted(self.items(), cmp, key, reverse)]
#
def setdefault(self, key, value=None):
if key in self:
return self[key]
else:
self[key] = value
return value
def clear(self):
dict.clear(self)
self._fields = []
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, SortedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
| {
"repo_name": "limodou/uliweb",
"path": "uliweb/utils/sorteddict.py",
"copies": "2",
"size": "3624",
"license": "bsd-2-clause",
"hash": 4390575032423411700,
"line_mean": 27.2258064516,
"line_max": 118,
"alpha_frac": 0.4950331126,
"autogenerated": false,
"ratio": 4.151202749140894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5646235861740894,
"avg_score": null,
"num_lines": null
} |
"""All sorts of geo utils for measurement project
"""
import math, sys, getopt
def distance(origin, destination, radius = 6371):
"""Based on Haversine formula, default return result is kilometers"""
# The Haversine formula is an equation that can be used to find great-circle distances between two points on a sphere from their longitudes and latitudes.
# When this formula is applied to the earth the results are an approximation because the Earth is not a perfect sphere.
# The currently accepted (WGS84) radius at the equator is 6378.137 km and 6356.752 km at the polar caps. For aviation purposes the FAI uses a radius of 6371.0 km
lat1, lon1 = origin
lat2, lon2 = destination
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if __name__ == "__main__":
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
seattle = [47.621800, -122.350326]
olympia = [47.041917, -122.893766]
print "distance:", distance(seattle, olympia)
## {{{ http://code.activestate.com/recipes/577360/ (r1)
import threading
def threaded_map(func, data, timeout=None):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in xrange(N)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join(timeout) if timeout else t.join()
return result
## end of http://code.activestate.com/recipes/577360/ }}}
def outputException(e):
import rpyc, traceback
try:
return
if type(e) is rpyc.core.async.AsyncResultTimeout:
print 'Result Timeout'
elif type(e) is EOFError:
print e, '-----------'
else:
print e
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback)
except:
return
| {
"repo_name": "nebgnahz/CS268NetworkMeasurement",
"path": "king/utilities.py",
"copies": "1",
"size": "2627",
"license": "bsd-2-clause",
"hash": -3475045028332974600,
"line_mean": 30.6506024096,
"line_max": 162,
"alpha_frac": 0.6478873239,
"autogenerated": false,
"ratio": 3.231242312423124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9148596396431673,
"avg_score": 0.046106647978290234,
"num_lines": 83
} |
__all__ = ['sources_list', 'newsApi_API', 'further_read_theNextWeb', 'invalid', 'thankyou', 'notready']
#functions
import json
from pprint import pprint
from urllib.request import urlopen
import binascii
from image_retriever import return_image
import re
from apikey import key
apiKey = key()
keys = {'1': "https://newsapi.org/v1/articles?source=the-next-web&sortBy=latest&apiKey={}".format(apiKey)
, '2': "https://newsapi.org/v1/articles?source=google-news&sortBy=top&apiKey={}".format(apiKey)}
def sources_list():
print("Possible news sources:\n\
\t1: The Next Web\n\
\t2: Google News\n\
\t0: Exit\n")
x = int(input("Enter your choice: "))
return x
def newsApi_API(x):
url = keys[str(x)]
print()
json_string = urlopen(url).read()
#with open('jsondata.txt') as data_file:
data = json.loads(json_string)
names = [d['title'] for d in data['articles']]
#pprint(names)
i = 1
for name in names:
print("{}: {}".format(i, name))
i += 1
print("\n")
resp1 = "y"
while (resp1 == "y" or resp1 == "Y"):
resp1 = input("Would you like to read more of any of these articles? (y/n) ")
if resp1 == "y" or resp1 == "Y":
further_read_theNextWeb(data)
else:
print("Exiting The Next Web... \n")
def further_read_theNextWeb(data):
resp2 = int(input("Which one? "))-1
s = data['articles'][resp2]
url = s['url']
html = urlopen(url).read()
#correction begins here
#string_lst = ['\\n', '\\r', '\\xc0', '\\xc1', 'sc2']
description = str(html).split("meta property=\"bt:body\" content=", 1)[1].split("\">", 1)[0]
description = description.replace("\\r\\n\\r\\n", ' ').replace("\\r\\n", ' ').replace("\\\'", "\'").replace("\\xc2", ' ').replace("\\xc0", ' ').replace("\\t", ' ').replace("\\xa0", ' ')
#description = re.sub(r"(?=("+'|'.join(string_lst)+r"))", ' ', description)
#escapes = ''.join([chr(char) for char in range(1, 32)])
#description = description.translate(None, escapes)
#correction ends here
print(image(s['urlToImage']))
print("\nTitle: {}\n\n\
Author: {}\n\n\
Description: {}\n\n\
To read more, go to {} \n".format(s['title'], s['author'], description, s['url']))
def image(url):
return return_image(url, 0.05, 5)
def invalid():
print()
print("Invalid option, try again!")
print("\n")
def thankyou():
print()
print("Thank you for using our news app!")
print("\n")
def notready():
print()
print("Not ready yet, check back later!")
print("\n") | {
"repo_name": "thepiprogrammer/PyNewsApp",
"path": "functions.py",
"copies": "1",
"size": "2418",
"license": "mit",
"hash": 77128028462996720,
"line_mean": 26.4886363636,
"line_max": 186,
"alpha_frac": 0.6244830438,
"autogenerated": false,
"ratio": 2.7539863325740317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8620584445985222,
"avg_score": 0.05157698607776198,
"num_lines": 88
} |
__all__ = ['splinequad']
import numpy as np
def splinequad(x):
"""Return the quadrature weights when a function is interpolated with cubic splines
The function retuns the values of the weights for a function that is evaluated in the locations x,
so that
int(f(x),x=a..b) = sum(w*f(x)), with x a partition of the interval [a,b]
It is based on the formulae developed in "The use of quadrature weights in cubic spline integration", C. M. Leung, R. W. Quan
International Journal of Mathematical Education in Science and Technology, Volume 15, Issue 3, 1984
http://www.tandfonline.com/doi/abs/10.1080/0020739840150306
Args:
x (array): abscissas where the function is evaluated
Returns:
float: array with the weights
"""
n = len(x)
dx = np.zeros(n-1)
for i in range(n-1):
dx[i] = x[i+1] - x[i]
w1 = np.zeros(n)
w2 = np.zeros(n)
a = np.zeros(n)
b = np.zeros(n)
c = np.zeros(n)
alpha = np.zeros(n)
beta = np.zeros(n)
gamma = np.zeros(n)
w1[0] = 0.5 * dx[0]
w1[n-1] = 0.5 * dx[n-2]
w2[0] = -1.0 / 24.0 * dx[0]**3
w2[n-1] = -1.0 / 24.0 * dx[n-2]**3
a[0] = -1.0 / dx[0]
a[n-1] = -1.0 / dx[n-3]
c[0] = -1.0 / dx[1]
c[n-1] = -1.0 / dx[n-2]
b[0] = -(a[0]+c[0])
a[n-1] = -(a[n-1] + c[n-1])
for i in range(1,n-1):
w1[i] = 0.5 * ( dx[i-1] + dx[i] )
w2[i] = -1.0 / 24.0 * ( dx[i-1]**3 + dx[i]**3 )
a[i] = dx[i-1]
c[i] = dx[i]
b[i] = 2.0*(a[i] + c[i])
alpha[i] = 6.0 / dx[i-1]
gamma[i] = 6.0 / dx[i]
beta[i] = -(alpha[i] + gamma[i])
U = np.zeros((n,n))
T = np.zeros((n,n))
U[0,0:3] = np.asarray([a[0],b[0],c[0]])
U[n-1,-3:] = np.asarray([a[n-1],b[n-1],c[n-1]])
for i in range(1,n-1):
U[i,i-1:i+2] = np.asarray([a[i], b[i], c[i]])
T[i,i-1:i+2] = np.asarray([alpha[i], beta[i], gamma[i]])
return w1 + w2.dot(np.linalg.inv(U).dot(T)) | {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/util/splinequad.py",
"copies": "1",
"size": "2002",
"license": "mit",
"hash": 7561980533589249000,
"line_mean": 26.0675675676,
"line_max": 129,
"alpha_frac": 0.507992008,
"autogenerated": false,
"ratio": 2.3608490566037736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8273944246467819,
"avg_score": 0.018979363627190847,
"num_lines": 74
} |
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
# These are in the API for fitpack even if not used in fitpack.py itself.
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-D space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
You can interpolate 1-D points with a B-spline curve.
Further examples are given in
:ref:`in the tutorial <tutorial-interpolate_splXXX>`.
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError as e:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext) from e
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
Examples
--------
You can insert knots into a B-spline.
>>> from scipy.interpolate import splrep, insert
>>> x = np.linspace(0, 10, 5)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> tck[0]
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
A knot is inserted:
>>> tck_inserted = insert(3, tck)
>>> tck_inserted[0]
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
Some knots are inserted:
>>> tck_inserted2 = insert(8, tck, m=3)
>>> tck_inserted2[0]
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
| {
"repo_name": "WarrenWeckesser/scipy",
"path": "scipy/interpolate/fitpack.py",
"copies": "16",
"size": "26807",
"license": "bsd-3-clause",
"hash": -8576654773767334000,
"line_mean": 34.1336828309,
"line_max": 80,
"alpha_frac": 0.6148767113,
"autogenerated": false,
"ratio": 3.614264527436969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006876688833875378,
"num_lines": 763
} |
__all__ = ["sqlite_dump", "sqlite_merge"]
from random import Random
import math
def random_expectations(depth=0, breadth=3, low=1, high=10, random=Random()):
"""
Generate depth x breadth array of random numbers where each row sums to
high, with a minimum of low.
"""
result = []
if depth == 0:
initial = high + 1
for i in range(breadth - 1):
n = random.randint(low, initial - (low * (breadth - i)))
initial -= n
result.append(n)
result.append(initial - low)
random.shuffle(result)
else:
result = [random_expectations(depth - 1, breadth, low, high, random) for x in range(breadth)]
return result
def rescale(new_low, new_high, low, diff, x):
scaled = (new_high-new_low)*(x - low)
scaled /= diff
return scaled + new_low
def weighted_random_choice(choices, weights, random=Random()):
population = [val for val, cnt in zip(choices, weights) for i in range(int(cnt))]
return random.choice(population)
def multinomial(probabilities, draws=1, random=Random()):
"""
Draw from a multinomial distribution
"""
def pick():
draw = random.random()
bracket = 0.
for i in range(len(probabilities)):
bracket += probabilities[i]
if draw < bracket:
return i
return i
result = [0] * len(probabilities)
for i in range(draws):
result[pick()] += 1
return result
def logistic_random(loc, scale, random=Random()):
"""
Return a random number from a specified logistic distribution.
"""
x = random.random()
return loc + scale * math.log(x / (1 - x))
def shuffled(target, random=Random()):
"""
Return a shuffled version of the argument
"""
a = target[:]
random.shuffle(a)
return a
def make_pbs_script(kwargs, hours=60, mins=0, ppn=16, script_name=None):
"""
Generate a PBS run script to be submitted.
"""
from disclosuregame.Util.sqlite_merge import list_matching
from os.path import split
args_dir, name = split(kwargs.kwargs[0])
kwargs_files = list_matching(args_dir, name)
count = len(kwargs_files)
import sys
args = sys.argv[1:]
args = " ".join(args)
args = args.replace("*", "${PBS_ARRAYID}")
args = args.replace(" %s " % kwargs.file_name, " ${PBS_ARRAYID}_%s " % kwargs.file_name)
if kwargs.file_name == "":
args += " -f ${PBS_ARRAYID}"
interpreter = sys.executable
run_script = ["#!/bin/bash -vx", "#PBS -l walltime=%d:%d:00" % (hours, mins), "#PBS -l nodes=1:ppn=%d" % ppn,
"module load python"]
# Doesn't work on multiple nodes, sadly
# Set up the call
run_call = "%s -m disclosuregame.run %s" % (interpreter, args)
run_script.append(run_call)
# Cleanup after all jobs have run
if script_name is not None:
run_script.append("if [$PBS_ARRAYID -eq %d]" % count)
run_script.append("then")
run_script.append("\trm %s" % script_name)
run_script.append("fi")
return '\n'.join(run_script), count
# ${python} Run.py -R 100 -s ${sig} -r ${resp} --pickled-arguments ../experiment_args/sensitivity_${PBS_ARRAYID}.args -f ${PBS_ARRAYID}_sensitivity -i 1000 -d ${dir} -g ${game}
| {
"repo_name": "greenape/risky-aging-model",
"path": "disclosuregame/Util/__init__.py",
"copies": "1",
"size": "3317",
"license": "mpl-2.0",
"hash": -4898991236380945000,
"line_mean": 27.8434782609,
"line_max": 180,
"alpha_frac": 0.5963219777,
"autogenerated": false,
"ratio": 3.4516129032258065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45479348809258063,
"avg_score": null,
"num_lines": null
} |
__all__ = ["sqlite_dump", "sqlite_merge"]
from random import Random
def random_expectations(depth=0, breadth=3, low=1, high=10, random=Random()):
"""
Generate depth x breadth array of random numbers where each row sums to
high, with a minimum of low.
"""
result = []
if depth == 0:
initial = high + 1
for i in range(breadth - 1):
n = random.randint(low, initial - (low * (breadth - i)))
initial -= n
result.append(n)
result.append(initial - low)
random.shuffle(result)
else:
result = [random_expectations(depth - 1, breadth, low, high, random) for x in range(breadth)]
return result
def weighted_random_choice(choices, weights, random=Random()):
population = [val for val, cnt in zip(choices, weights) for i in range(int(cnt))]
return random.choice(population)
def shuffled(target, random=Random()):
"""
Return a shuffled version of the argument
"""
a = list(target)
random.shuffle(a)
return a
def make_pbs_script(kwargs, hours=60, mins=0, ppn=16, script_name=None):
"""
Generate a PBS run script to be submitted.
"""
from disclosuregame.Util.sqlite_merge import list_matching
from os.path import split
args_dir, name = split(kwargs.kwargs[0])
kwargs_files = list_matching(args_dir, name)
count = len(kwargs_files)
import sys
args = sys.argv[1:]
args = " ".join(args)
args = args.replace("*", "${PBS_ARRAYID}")
args = args.replace(" %s " % kwargs.file_name, " ${PBS_ARRAYID}_%s " % kwargs.file_name)
if kwargs.file_name == "":
args += " -f ${PBS_ARRAYID}"
interpreter = sys.executable
run_script = ["#!/bin/bash -vx"]
run_script.append("#PBS -l walltime=%d:%d:00" % (hours, mins))
#Doesn't work on multiple nodes, sadly
run_script.append("#PBS -l nodes=1:ppn=%d" % ppn)
run_script.append("module load python")
#Set up the call
run_call = "%s -m disclosuregame.run %s" % (interpreter, args)
run_script.append(run_call)
#Cleanup after all jobs have run
if script_name is not None:
run_script.append("if [$PBS_ARRAYID -eq %d]" % count)
run_script.append("then")
run_script.append("\trm %s" % script_name)
run_script.append("fi")
return('\n'.join(run_script), count)
#${python} Run.py -R 100 -s ${sig} -r ${resp} --pickled-arguments ../experiment_args/sensitivity_${PBS_ARRAYID}.args -f ${PBS_ARRAYID}_sensitivity -i 1000 -d ${dir} -g ${game}
| {
"repo_name": "greenape/disclosure-game-module",
"path": "disclosuregame/Util/__init__.py",
"copies": "1",
"size": "2545",
"license": "mpl-2.0",
"hash": 1928217043758980000,
"line_mean": 32.4868421053,
"line_max": 179,
"alpha_frac": 0.6141453831,
"autogenerated": false,
"ratio": 3.2881136950904395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4402259078190439,
"avg_score": null,
"num_lines": null
} |
"""All sql pragmas to generate the sqlite database."""
# Pragmas relating to adsorbates
PRAGMA_ADSORBATES = """
DROP TABLE IF EXISTS "adsorbates";
CREATE TABLE "adsorbates" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`name` TEXT NOT NULL UNIQUE
);
"""
PRAGMA_ADSORBATE_PROPERTIES = """
DROP TABLE IF EXISTS "adsorbate_properties";
CREATE TABLE `adsorbate_properties` (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`ads_id` INTEGER NOT NULL,
`type` TEXT NOT NULL,
`value` REAL NOT NULL,
FOREIGN KEY(`ads_id`) REFERENCES `adsorbates`(`id`),
FOREIGN KEY(`type`) REFERENCES `adsorbate_properties_type`(`type`)
);
"""
PRAGMA_ADSORBATE_PROPERTIES_TYPE = """
DROP TABLE IF EXISTS "adsorbate_properties_type";
CREATE TABLE `adsorbate_properties_type` (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`type` TEXT NOT NULL UNIQUE,
`unit` TEXT,
`description` TEXT
);
"""
# Pragmas relating to materials
PRAGMA_MATERIALS = """
DROP TABLE IF EXISTS "materials";
CREATE TABLE "materials" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`name` TEXT NOT NULL UNIQUE
);
"""
PRAGMA_MATERIAL_PROPERTIES = """
DROP TABLE IF EXISTS "material_properties";
CREATE TABLE "material_properties" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`mat_id` INTEGER NOT NULL,
`type` TEXT NOT NULL,
`value` REAL NOT NULL,
FOREIGN KEY(`mat_id`) REFERENCES `materials`(`id`),
FOREIGN KEY(`type`) REFERENCES 'material_properties_type'('type')
);
"""
PRAGMA_MATERIAL_PROPERTY_TYPE = """
DROP TABLE IF EXISTS "material_properties_type";
CREATE TABLE "material_properties_type" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`type` TEXT NOT NULL UNIQUE,
`unit` TEXT,
`description` TEXT
);
"""
# Pragmas relating to isotherms
PRAGMA_ISOTHERMS = """
DROP TABLE IF EXISTS "isotherms";
CREATE TABLE "isotherms" (
`id` TEXT NOT NULL PRIMARY KEY UNIQUE,
`iso_type` TEXT NOT NULL,
`material` TEXT NOT NULL,
`adsorbate` TEXT NOT NULL,
`temperature` REAL NOT NULL,
FOREIGN KEY(`iso_type`) REFERENCES `isotherm_type`(`type`),
FOREIGN KEY(`material`) REFERENCES `materials`(`name`),
FOREIGN KEY(`adsorbate`) REFERENCES `adsorbates`(`name`)
);
"""
PRAGMA_ISOTHERM_TYPE = """
DROP TABLE IF EXISTS "isotherm_type";
CREATE TABLE "isotherm_type" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`type` TEXT NOT NULL UNIQUE,
`description` TEXT
);
"""
PRAGMA_ISOTHERM_PROPERTIES = """
DROP TABLE IF EXISTS "isotherm_properties";
CREATE TABLE "isotherm_properties" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`iso_id` INTEGER NOT NULL,
`type` TEXT NOT NULL,
`value` REAL NOT NULL,
FOREIGN KEY(`iso_id`) REFERENCES `isotherms`(`id`)
);
"""
PRAGMA_ISOTHERM_DATA = """
DROP TABLE IF EXISTS "isotherm_data";
CREATE TABLE "isotherm_data" (
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`iso_id` INTEGER NOT NULL,
`type` TEXT NOT NULL,
`data` BLOB NOT NULL,
FOREIGN KEY(`iso_id`) REFERENCES `isotherms`(`id`)
);
"""
# List of pragmas
PRAGMAS = [
PRAGMA_ADSORBATES,
PRAGMA_ADSORBATE_PROPERTIES_TYPE,
PRAGMA_ADSORBATE_PROPERTIES,
PRAGMA_MATERIALS,
PRAGMA_MATERIAL_PROPERTY_TYPE,
PRAGMA_MATERIAL_PROPERTIES,
PRAGMA_ISOTHERM_TYPE,
PRAGMA_ISOTHERMS,
PRAGMA_ISOTHERM_PROPERTIES,
PRAGMA_ISOTHERM_DATA,
]
| {
"repo_name": "pauliacomi/pyGAPS",
"path": "src/pygaps/utilities/sqlite_db_pragmas.py",
"copies": "1",
"size": "4288",
"license": "mit",
"hash": -8629820648865162000,
"line_mean": 29.1971830986,
"line_max": 81,
"alpha_frac": 0.5583022388,
"autogenerated": false,
"ratio": 3.4720647773279354,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9529937610425427,
"avg_score": 0.00008588114050154586,
"num_lines": 142
} |
# All square roots are periodic when written as continued fractions and can
# be written in the form:
# sqrt(N) = a0 + 1
# ------------------
# a1 + 1
# -------------
# a2 + 1
# --------
# a3 + ...
# For example, let us consider sqrt(23):
# sqrt(23) = 4 + sqrt(23) - 4 = 4 + 1 = 4 + 1
# ------------ ----------------
# 1 sqrt(23) - 3
# ------------ 1 + ------------
# sqrt(23) - 4 7
# If we continue we would get the following expansion:
# sqrt(23) = 4 + 1
# -------------------
# 1 + 1
# ---------------
# 3 + 1
# -----------
# 1 + 1
# -------
# 8 + ...
# The process can be summarised as follows:
# 1 sqrt(23) + 4 sqrt(23) - 3
# a0 = 4, ------------ = ------------ = 1 + ------------
# sqrt(23) - 4 7 7
# 7 7(sqrt(23) + 3) sqrt(23) - 3
# a1 = 1, ------------ = --------------- = 3 + ------------
# sqrt(23) - 3 14 2
# 2 2(sqrt(23) + 3) sqrt(23) - 4
# a2 = 3, ------------ = --------------- = 1 + ------------
# sqrt(23) - 3 14 7
# 7 7(sqrt(23) + 4)
# a3 = 1, ------------ = --------------- = 8 + sqrt(23) - 4
# sqrt(23) - 4 7
# 1 sqrt(23) + 4 sqrt(23) - 3
# a4 = 8, ------------ = ------------ = 1 + ------------
# sqrt(23) - 4 7 7
# 7 7(sqrt(23) + 3) sqrt(23) - 3
# a5 = 1, ------------ = --------------- = 3 + ------------
# sqrt(23) - 3 14 2
# 2 2(sqrt(23) + 3) sqrt(23) - 4
# a6 = 3, ------------ = --------------- = 1 + ------------
# sqrt(23) - 3 14 7
# 7 7(sqrt(23) + 4)
# a7 = 1, ------------ = --------------- = 8 + sqrt(23) - 4
# sqrt(23) - 4 7
# It can be seen that the sequence is repeating. For conciseness, we use the
# notation sqrt(23) = [4;(1,3,1,8)], to indicate that the block (1,3,1,8)
# repeats indefinitely.
# The first ten continued fraction representations of (irrational) square
# roots are:
# sqrt(2) = [1;(2)], period = 1
# sqrt(3) = [1;(1,2)], period = 2
# sqrt(5) = [2;(4)], period = 1
# sqrt(6) = [2;(2,4)], period = 2
# sqrt(7) = [2;(1,1,1,4)], period = 4
# sqrt(8) = [2;(1,4)], period = 2
# sqrt(10) = [3;(6)], period = 1
# sqrt(11) = [3;(3,6)], period = 2
# sqrt(12) = [3;(2,6)], period = 2
# sqrt(13) = [3;(1,1,1,1,6)], period = 5
# Exactly four continued fractions, for N <= 13, have an odd period.
# How many continued fractions for N <= 10000 have an odd period?
from math import sqrt
def is_square(num):
return int(sqrt(num)) == sqrt(num)
def get_period_length(num):
m = 0
d = 1
a0 = int(sqrt(num))
a = a0
period = []
while True:
m = d * a - m
d = (num - m * m) / d
a = (a0 + m) / d
if (a, m, d) in period:
return len(period) - period.index((a, m, d))
period.append((a, m, d))
ans = 0
for N in range(2, 10001):
if not is_square(N):
if get_period_length(N) % 2 == 1:
ans += 1
print ans
| {
"repo_name": "cloudzfy/euler",
"path": "src/64.py",
"copies": "1",
"size": "3644",
"license": "mit",
"hash": 1198390766332313900,
"line_mean": 31.2477876106,
"line_max": 76,
"alpha_frac": 0.3240944018,
"autogenerated": false,
"ratio": 3.1742160278745644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3998310429674564,
"avg_score": null,
"num_lines": null
} |
__all__ = ['squash']
from operator import add, eq
def squash(predicate, list_):
"""Squash multiple consecutive list elements into one
For each sequence of elements, optionally interspersed with whitespace or
empty strings, for which the predicate returns True, leave the first
element in the sequence and drop the rest.
Arguments:
predicate -- function of one argument returning True for squashable
elements
list_ -- list of elements to modify. Every list element, for which
the predicate returns False, must be a string.
Return value:
A new list with squashed elements. The input list remains unchanged.
"""
assert not filter(lambda x: not (predicate(x) or type(x) is str), list_)
def find(index, list_):
"""Find the first element which satisfies the predicate
Return the index of the first element in list_[index:] for which
predicate(elem) is True.
Return -1 if no such element has been found.
"""
indices = [i for i, x in enumerate(list_[index:]) if predicate(x)]
if indices:
return index + indices[0]
return -1
result = list_[:]
base = 0
while True:
base = find(base, result)
if base < 0:
break
n = find(base+1, result)
if n < 0:
break
if not reduce(add, result[base+1:n], '').strip():
# There are either no elements or only whitespace and empty string
# elements between base and n.
result[base+1:n+1] = []
else:
base = n
return result
| {
"repo_name": "alco/numspell",
"path": "numspell/squash.py",
"copies": "1",
"size": "1668",
"license": "mit",
"hash": -960326404658227800,
"line_mean": 27.2711864407,
"line_max": 78,
"alpha_frac": 0.5959232614,
"autogenerated": false,
"ratio": 4.459893048128342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
__all__ = ['SSTable']
import os
import sys
import mmap
import time
import struct
from .index import Index
from .offset import Offset
class SSTable(object):
def __init__(self, table, t=None, rows=None):
self.table = table
if not t: t = '%.4f' % time.time()
self.t = t
self.opened = False
# offset
offset = Offset(self, t)
self.offset = offset
# indexes
self.indexes = {}
# index by primary key
indexed_columns = (tuple(table.schema.primary_key),)
# index each column in primary key
# used for ranged queries
indexed_columns += tuple((n,) for n in table.schema.primary_key)
for n in indexed_columns:
index = Index(self, t, n)
self.indexes[n] = index
self.f = None
self.mm = None
# rows
if rows:
self.w_open()
self._add_rows(rows)
self.w_close()
def __repr__(self):
return '<%s db: %r, table: %r, t: %r>' % (
self.__class__.__name__,
self.table.db.db_name,
self.table.table_name,
self.t,
)
def __enter__(self):
'''
Used only on data writing to file.
'''
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
'''
Used only on data writing to file.
'''
self.close()
return False
def __add__(self, other):
# FIXME:
pass
def get_path(self):
filename = 'sstable-%s.data' % self.t
path = os.path.join(self.table.get_path(), filename)
return path
def is_opened(self):
return self.opened
def open(self):
'''
Used only on data reading from file.
'''
self.f = open(self.get_path(), 'r+b')
self.mm = mmap.mmap(self.f.fileno(), 0)
self.offset.open()
for column_names, index in self.indexes.items():
index.open()
self.opened = True
def close(self):
'''
Used only on data reading from file.
'''
for column_names, index in self.indexes.items():
index.close()
self.offset.close()
self.mm.close()
self.f.close()
self.opened = False
def w_open(self):
'''
Open file for writing.
'''
self.f = open(self.get_path(), 'wb')
self.offset.w_open()
for column_names, index in self.indexes.items():
index.w_open()
def w_close(self):
'''
Close file for writing.
'''
for column_names, index in self.indexes.items():
index.w_close()
self.offset.w_close()
self.f.close()
def _add_rows(self, rows):
for row in rows:
self._add_row(row)
def _add_row(self, row):
# sstable
sstable_pos = self.f.tell()
self._write_row(row)
# offset
self.offset._write_sstable_pos(sstable_pos)
# index
for column_names, index in self.indexes.items():
index._write_key(row, sstable_pos)
def _write_row(self, row):
table = self.table
row_blob_items = []
for c, t in table.schema:
v = row.get(c, None)
b = t._get_column_packed(v)
row_blob_items.append(b)
_row_blob = b''.join(row_blob_items)
_row_size = struct.pack(b'!Q', len(_row_blob))
self.f.write(_row_size)
self.f.write(_row_blob)
def _read_row(self, pos):
row_blob_len, = struct.unpack_from('!Q', self.mm, pos)
row = {}
p = pos + 8
for c, t in self.table.schema:
v, p = t._get_column_unpacked(self.mm, p)
row[c] = v
return row
def get(self, key, columns=None):
if columns:
columns = tuple(columns)
else:
columns = tuple(self.table.schema.primary_key)
index = self.indexes[columns]
offset_pos, sstable_pos = index.get_sstable_pos(key)
row = self._read_row(sstable_pos)
return row, offset_pos, sstable_pos
def get_lt(self, key, columns=None):
if columns:
columns = tuple(columns)
else:
columns = tuple(self.table.schema.primary_key)
index = self.indexes[columns]
offset_pos, sstable_pos = index.get_lt_sstable_pos(key)
row = self._read_row(sstable_pos)
return row, offset_pos, sstable_pos
def get_le(self, key, columns=None):
if columns:
columns = tuple(columns)
else:
columns = tuple(self.table.schema.primary_key)
index = self.indexes[columns]
offset_pos, sstable_pos = index.get_le_sstable_pos(key)
row = self._read_row(sstable_pos)
return row, offset_pos, sstable_pos
def get_gt(self, key, columns=None):
if columns:
columns = tuple(columns)
else:
columns = tuple(self.table.schema.primary_key)
index = self.indexes[columns]
offset_pos, sstable_pos = index.get_gt_sstable_pos(key)
row = self._read_row(sstable_pos)
return row, offset_pos, sstable_pos
def get_ge(self, key, columns=None):
if columns:
columns = tuple(columns)
else:
columns = tuple(self.table.schema.primary_key)
index = self.indexes[columns]
offset_pos, sstable_pos = index.get_ge_sstable_pos(key)
row = self._read_row(sstable_pos)
return row, offset_pos, sstable_pos
| {
"repo_name": "yadb/yadb",
"path": "backup/store/sstable.py",
"copies": "1",
"size": "5681",
"license": "mit",
"hash": 16020724616903260,
"line_mean": 24.9406392694,
"line_max": 72,
"alpha_frac": 0.5271959162,
"autogenerated": false,
"ratio": 3.6627981947130883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9674799748502083,
"avg_score": 0.0030388724822011525,
"num_lines": 219
} |
__all__ = ['standardize_agent_name', 'standardize_db_refs', 'get_standard_name',
'standardize_name_db_refs', 'get_standard_agent']
import logging
from copy import deepcopy
from collections import defaultdict
from indra.statements.agent import default_ns_order, get_grounding, Agent
from indra.statements.validate import assert_valid_db_refs
logger = logging.getLogger(__name__)
default_ns_priorities = {ns: idx for idx, ns in enumerate(default_ns_order)}
def prioritize(ns1, ns2, ns_order=None):
ns_priorities = {ns: idx for idx, ns in enumerate(ns_order)} \
if ns_order is not None else default_ns_priorities
ns1p = ns_priorities.get(ns1)
ns2p = ns_priorities.get(ns2)
if ns2p is not None and (ns1p is None or ns2p < ns1p):
return True
return False
def _get_mappings_dict(mappings):
md = defaultdict(list)
for db_ns, db_id in mappings:
md[db_ns].append(db_id)
return md
def get_standard_agent(name, db_refs, ontology=None, ns_order=None, **kwargs):
"""Get a standard agent based on the name, db_refs, and a any other kwargs.
name : str
The name of the agent that may not be standardized.
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
kwargs :
Keyword arguments to pass to :func:`Agent.__init__`.
Returns
-------
Agent
A standard agent
"""
standard_name, db_refs = standardize_name_db_refs(db_refs,
ontology=ontology,
ns_order=ns_order)
if standard_name:
name = standard_name
assert_valid_db_refs(db_refs)
return Agent(name, db_refs=db_refs, **kwargs)
def standardize_db_refs(db_refs, ontology=None, ns_order=None):
"""Return a standardized db refs dict for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
dict
The db_refs dict with standardized entries.
"""
if ontology is None:
from indra.ontology.bio import bio_ontology
ontology = bio_ontology
# We iterate over all the db_refs entries that currently exist
for source_db_ns, source_db_id in deepcopy(db_refs).items():
source_db_id = _preprocess_for_mapping(source_db_ns, source_db_id)
# For the entry we get all its xref mappings as a list
# of tuples and turn it into a dict keyed by namespace
mappings = _get_mappings_dict(
ontology.get_mappings(source_db_ns, source_db_id))
# We iterate over these mappings and check if they should
# be applied
for mapped_db_ns, mapped_db_ids in mappings.items():
# If the db_refs doesn't yet contain a mapping for this
# name space then we always add this mapping. If there
# is already an entry for this name space then
# we overwrite it if the source name space is higher
# priority than the name space being mapped to.
if mapped_db_ns not in db_refs or \
prioritize(mapped_db_ns, source_db_ns,
ns_order=ns_order):
db_refs[mapped_db_ns] = sorted(mapped_db_ids)[0]
return db_refs
def _preprocess_for_mapping(db_ns, db_id):
if db_ns == 'UP' and db_id is not None and '-' in db_id:
return db_id.split('-')[0]
return db_id
def standardize_name_db_refs(db_refs, ontology=None, ns_order=None):
"""Return a standardized name and db refs dict for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
str or None
The standard name based on the db refs, None if not available.
dict
The db_refs dict with standardized entries.
"""
db_refs = standardize_db_refs(db_refs, ontology=ontology,
ns_order=ns_order)
name = get_standard_name(db_refs, ontology=ontology, ns_order=ns_order)
return name, db_refs
def get_standard_name(db_refs, ontology=None, ns_order=None):
"""Return a standardized name for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
str or None
The standard name based on the db refs, None if not available.
"""
if ontology is None:
from indra.ontology.bio import bio_ontology
ontology = bio_ontology
# We next look for prioritized grounding, if missing, we return
db_ns, db_id = get_grounding(db_refs, ns_order=ns_order)
# If there's no grounding then we can't do more to standardize the
# name and return
if not db_ns or not db_id:
return None
# If there is grounding available, we can try to get the standardized name
# and in the rare case that we don't get it, we don't set it.
standard_name = ontology.get_name(db_ns, db_id)
# Handle special case with UPPRO, if we can't get a feature name
# we fall back on regular gene/protein naming
if not standard_name and db_ns == 'UPPRO':
db_ns, db_id = get_grounding(db_refs, ns_order=['HGNC', 'UP'])
if not db_ns or not db_id:
return None
standard_name = ontology.get_name(db_ns, db_id)
if not standard_name:
return None
return standard_name
def standardize_agent_name(agent, standardize_refs=True, ontology=None,
ns_order=None):
"""Standardize the name of an Agent based on grounding information.
The priority of which namespace is used as the bases for the
standard name depends on
Parameters
----------
agent : indra.statements.Agent
An INDRA Agent whose name attribute should be standardized based
on grounding information.
standardize_refs : Optional[bool]
If True, this function assumes that the Agent's db_refs need to
be standardized, e.g., HGNC mapped to UP.
Default: True
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
bool
True if a new name was set, False otherwise.
"""
# If the Agent is None, we return immediately
if agent is None:
return False
# If we want to standardize the Agent's db_refs, we call this now
if standardize_refs:
agent.db_refs = standardize_db_refs(agent.db_refs, ontology=ontology)
# We next try to get a standard name based on the Agent's grounding
standard_name = get_standard_name(agent.db_refs, ontology=ontology,
ns_order=ns_order)
# If we got a proper standard name, we apply it
if standard_name:
agent.name = standard_name
return True
return False
| {
"repo_name": "bgyori/indra",
"path": "indra/ontology/standardize.py",
"copies": "3",
"size": "8718",
"license": "bsd-2-clause",
"hash": 2306476647895578000,
"line_mean": 36.4163090129,
"line_max": 80,
"alpha_frac": 0.6425785731,
"autogenerated": false,
"ratio": 3.8422212428382547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005298574683410162,
"num_lines": 233
} |
__all__ = ['standardize_agent_name', 'standardize_db_refs', 'get_standard_name',
'standardize_name_db_refs']
import logging
from copy import deepcopy
from collections import defaultdict
from indra.statements.agent import default_ns_order, get_grounding
logger = logging.getLogger(__name__)
default_ns_priorities = {ns: idx for idx, ns in enumerate(default_ns_order)}
def prioritize(ns1, ns2, ns_order=None):
ns_priorities = {ns: idx for idx, ns in enumerate(ns_order)} \
if ns_order is not None else default_ns_priorities
ns1p = ns_priorities.get(ns1)
ns2p = ns_priorities.get(ns2)
if ns2p is not None and (ns1p is None or ns2p < ns1p):
return True
return False
def _get_mappings_dict(mappings):
md = defaultdict(list)
for db_ns, db_id in mappings:
md[db_ns].append(db_id)
return md
def standardize_db_refs(db_refs, ontology=None, ns_order=None):
"""Return a standardized db refs dict for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
dict
The db_refs dict with standardized entries.
"""
if ontology is None:
from indra.ontology.bio import bio_ontology
ontology = bio_ontology
# We iterate over all the db_refs entries that currently exist
for source_db_ns, source_db_id in deepcopy(db_refs).items():
source_db_id = _preprocess_for_mapping(source_db_ns, source_db_id)
# For the entry we get all its xref mappings as a list
# of tuples and turn it into a dict keyed by namespace
mappings = _get_mappings_dict(
ontology.get_mappings(source_db_ns, source_db_id))
# We iterate over these mappings and check if they should
# be applied
for mapped_db_ns, mapped_db_ids in mappings.items():
# If the db_refs doesn't yet contain a mapping for this
# name space then we always add this mapping. If there
# is already an entry for this name space then
# we overwrite it if the source name space is higher
# priority than the name space being mapped to.
if mapped_db_ns not in db_refs or \
prioritize(mapped_db_ns, source_db_ns,
ns_order=ns_order):
db_refs[mapped_db_ns] = sorted(mapped_db_ids)[0]
return db_refs
def _preprocess_for_mapping(db_ns, db_id):
if db_ns == 'UP' and db_id is not None and '-' in db_id:
return db_id.split('-')[0]
return db_id
def standardize_name_db_refs(db_refs, ontology=None, ns_order=None):
"""Return a standardized name and db refs dict for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
str or None
The standard name based on the db refs, None if not available.
dict
The db_refs dict with standardized entries.
"""
db_refs = standardize_db_refs(db_refs, ontology=ontology,
ns_order=ns_order)
name = get_standard_name(db_refs)
return name, db_refs
def get_standard_name(db_refs, ontology=None, ns_order=None):
"""Return a standardized name for a given db refs dict.
Parameters
----------
db_refs : dict
A dict of db refs that may not be standardized, i.e., may be
missing an available UP ID corresponding to an existing HGNC ID.
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
str or None
The standard name based on the db refs, None if not available.
"""
if ontology is None:
from indra.ontology.bio import bio_ontology
ontology = bio_ontology
# We next look for prioritized grounding, if missing, we return
db_ns, db_id = get_grounding(db_refs, ns_order=ns_order)
# If there's no grounding then we can't do more to standardize the
# name and return
if not db_ns or not db_id:
return None
# If there is grounding available, we can try to get the standardized name
# and in the rare case that we don't get it, we don't set it.
standard_name = ontology.get_name(db_ns, db_id)
# Handle special case with UPPRO, if we can't get a feature name
# we fall back on regular gene/protein naming
if not standard_name and db_ns == 'UPPRO':
db_ns, db_id = get_grounding(db_refs, ns_order=['HGNC', 'UP'])
if not db_ns or not db_id:
return None
standard_name = ontology.get_name(db_ns, db_id)
if not standard_name:
return None
return standard_name
def standardize_agent_name(agent, standardize_refs=True, ontology=None,
ns_order=None):
"""Standardize the name of an Agent based on grounding information.
The priority of which namespace is used as the bases for the
standard name depends on
Parameters
----------
agent : indra.statements.Agent
An INDRA Agent whose name attribute should be standardized based
on grounding information.
standardize_refs : Optional[bool]
If True, this function assumes that the Agent's db_refs need to
be standardized, e.g., HGNC mapped to UP.
Default: True
ontology : Optional[indra.ontology.IndraOntology]
An IndraOntology object, if not provided, the default BioOntology
is used.
ns_order : Optional[list]
A list of namespaces which are in order of priority with higher
priority namespaces appearing earlier in the list.
Returns
-------
bool
True if a new name was set, False otherwise.
"""
# If the Agent is None, we return immediately
if agent is None:
return False
# If we want to standardize the Agent's db_refs, we call this now
if standardize_refs:
agent.db_refs = standardize_db_refs(agent.db_refs, ontology=ontology)
# We next try to get a standard name based on the Agent's grounding
standard_name = get_standard_name(agent.db_refs, ontology=ontology,
ns_order=ns_order)
# If we got a proper standard name, we apply it
if standard_name:
agent.name = standard_name
return True
return False
| {
"repo_name": "johnbachman/belpy",
"path": "indra/ontology/standardize.py",
"copies": "1",
"size": "7402",
"license": "mit",
"hash": 8135058347529703000,
"line_mean": 35.8258706468,
"line_max": 80,
"alpha_frac": 0.6465820049,
"autogenerated": false,
"ratio": 3.817431665807117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9963706564264023,
"avg_score": 0.00006142128861863521,
"num_lines": 201
} |
"""AllStars URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/activity/', include('activities.urls', namespace='activities')),
url(r'^api/admin/', include('administrator.urls', namespace='administrator')),
url(r'^api/docs/', include('rest_framework_swagger.urls')),
url(r'^api/employee/', include('employees.urls', namespace='employees')),
url(r'^api/event/', include('events.urls', namespace='events')),
url(r'^api/category/', include('categories.urls', namespace='categories')),
url(r'^api/star/', include('stars.urls', namespace='stars')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "belatrix/BackendAllStars",
"path": "AllStars/urls.py",
"copies": "1",
"size": "1433",
"license": "apache-2.0",
"hash": 8650061700512537000,
"line_mean": 46.7666666667,
"line_max": 82,
"alpha_frac": 0.7013258897,
"autogenerated": false,
"ratio": 3.6186868686868685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48200127583868685,
"avg_score": null,
"num_lines": null
} |
__all__ = ['StateChecker']
# FEATURES
# Allow overriding limits - CHECK
# Allow captcha or block not to be used - CHECK
# Allow overriding expiration times - CHECK
# Allow changing format of storage keys - CHECK
# Easy to use asynchronously (client should not need to wait for the throttling module before returning an answer to the user) - CHECK
# Independent of the storage method (accept any object following memcache interface) - CHECK
# Allow captcha from services - CHECK
# First time showing a login page to a user, no login attempt has been made, but it should be possible to show the throttling state to the user (a captcha form or a block timeout) - CHECK
# Allow throttling of other requests besides login (registration, password recovery, POSTs in general) - CHECK
# Different storage keys for counters of different functionalities (registration, password recovery) - CHECK
# Provide helper functions for most common implementations (using memcache, synchronous, etc.)
class StateChecker(object):
"""
Use this class to check the throttling state of client requests.
:param counters_storage: Storage to be used for storing throttling counters.
:type counters_storage: :class:`securitylib.throttling.common.CountersStorage`
:param session_storage: Storage to be used to store sessions.
:type session_storage: :class:`securitylib.throttling.common.SessionStorage`
"""
def __init__(self, counters_storage, session_storage):
self.counters_storage = counters_storage
self.session_storage = session_storage
def check_state(self, ip, user=None, pwd=None, session_id=None, ctx=''):
"""
Returns the throttling state for a given request.
You should call this method before processing a request to find out if a request should be blocked,
if a captcha must be validated before processing the request, or if no action is needed before proceeding.
You can also call this method before presenting a page to the user in order to find out
what you should present: a message saying the user is blocked, a captcha for the user to
fill, or neither.
:param ip: The ip of the client that made the request.
:type ip: :class:`str`
:param user: The user that the client sent in his login request. (used for login attempts)
:type user: :class:`str`
:param pwd: The password that the client sent in his login request. (used for login attempts)
:type pwd: :class:`str`
:param session_id: The session_id for the client's session. This session_id should be generated using
:func:`~securitylib.random.get_random_token` or another function with the same properties,
and should be stored in a cookie in the client's browser.
This session_id is to be used for thottling purposes only and so should persist even after the
user logs out of the application, contrary to typical sessions.
This session_id is used only for login requests and thus can be omitted for other requests.
:type session_id: :class:`str`
:param ctx: The context of the request. Use this if you want to have different throttling counters for different
parts of your application. For example, you might want to separate the throttling for login requests from
that of password recovery requests, meaning that a user can be blocked from sending login requests but
can still try a password recovery.
Each string will access different counters, so make sure that you always use the same string for a given context.
:type ip: :class:`str`
:returns: :class:`dict` -- A dictionary with the requested throttling state. It always has a 'state' key
which can have three values: 'ok', 'captcha' and 'block' (meaning should be obvious from the documentation above).
If 'state' is 'block' there is an additional key called 'unblock_timestamp' which will contain a timestamp
of the time when the 'block' state will end. This can be used to tell the client when he will be
unblocked.
"""
if session_id:
session = self.session_storage.get(session_id)
if session and user and session.has_valid_login(user):
return {'state': 'ok'}
counters = self.counters_storage.get(ip, user, pwd, ctx)
response = counters.get_info()
return response
| {
"repo_name": "sapo/securitylib-python",
"path": "securitylib/throttling/client.py",
"copies": "1",
"size": "4510",
"license": "mit",
"hash": -1359517365153365500,
"line_mean": 56.0886075949,
"line_max": 187,
"alpha_frac": 0.7019955654,
"autogenerated": false,
"ratio": 4.4831013916500995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56850969570501,
"avg_score": null,
"num_lines": null
} |
__all__ = ['statement_to_annotations']
from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if evidence.text_refs.get('PMCID'):
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
elif evidence.text_refs.get('URL'):
url = evidence.text_refs['URL']
else:
return None
return {
'url': url,
'target_text': evidence.text,
'tags': [evidence.source_api]
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs,
grounding_ns)
if not db_ns:
continue
identifiers_url = \
identifiers.get_identifiers_url(db_ns, db_id)
grounding_text = '[%s](%s)' % (agent_wc.name, identifiers_url)
insert_len = len(grounding_text) - agent_wc.coords[1] + \
agent_wc.coords[0]
inserts.append((agent_wc.coords[0], insert_len))
before_part = annotation_text[:agent_wc.coords[0]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
| {
"repo_name": "sorgerlab/indra",
"path": "indra/sources/hypothesis/annotator.py",
"copies": "3",
"size": "2404",
"license": "bsd-2-clause",
"hash": 6818856717971041000,
"line_mean": 34.3529411765,
"line_max": 74,
"alpha_frac": 0.578202995,
"autogenerated": false,
"ratio": 3.6646341463414633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
__all__ = ['State']
from ..connection import MAVLinkMessage, MAVLinkConnection
from ...state import State as BaseState
from ...state import var
# These are the messages we should later listen to
IMPORTANT_MESSAGE_NAMES = [
'GLOBAL_POSITION_INT',
'ATTITUDE',
'VFR_HUD',
'SYS_STATUS',
'EKF_STATUS_REPORT',
'HEARTBEAT',
'HOME_POSITION',
]
class State(BaseState):
home_latitude = var(float,
lambda c: c.conn.home_location.lat,
noise=0.0005)
home_longitude = var(float,
lambda c: c.conn.home_location.lon,
noise=0.0005)
altitude = var(float,
lambda c: c.conn.location.global_relative_frame.alt, # noqa: pycodestyle
noise=0.5)
latitude = var(float,
lambda c: c.conn.location.global_relative_frame.lat, # noqa: pycodestyle
noise=0.0005)
longitude = var(float,
lambda c: c.conn.location.global_relative_frame.lon, # noqa: pycodestyle
noise=0.0005)
armable = var(bool, lambda c: c.conn.is_armable)
armed = var(bool, lambda c: c.conn.armed)
mode = var(str, lambda c: c.conn.mode.name)
vx = var(float, lambda c: c.conn.velocity[0], noise=0.3)
vy = var(float, lambda c: c.conn.velocity[1], noise=0.3)
vz = var(float, lambda c: c.conn.velocity[2], noise=0.3)
pitch = var(float, lambda c: c.conn.attitude.pitch, noise=0.2)
yaw = var(float, lambda c: c.conn.attitude.yaw, noise=0.2)
roll = var(float, lambda c: c.conn.attitude.roll, noise=0.2)
heading = var(float, lambda c: c.conn.heading, noise=15) # FIXME high?
airspeed = var(float, lambda c: c.conn.airspeed, noise=1.0)
groundspeed = var(float, lambda c: c.conn.groundspeed, noise=1.0)
ekf_ok = var(bool, lambda c: c.conn.ekf_ok)
# throttle_channel = var(float, lambda c: c.conn.channels['3'])
# roll_channel = var(float, lambda c: c.conn.channels['1'])
def evolve(self,
message: MAVLinkMessage,
time_offset: float,
connection: MAVLinkConnection
) -> 'State':
values = {name: v.read(connection)
for (name, v) in self.variables.items()}
values['time_offset'] = time_offset
state_new = self.__class__(**values)
return state_new
| {
"repo_name": "squaresLab/Houston",
"path": "houston/ardu/copter/state.py",
"copies": "1",
"size": "2409",
"license": "mit",
"hash": 3167014822209871400,
"line_mean": 39.15,
"line_max": 93,
"alpha_frac": 0.5873806559,
"autogenerated": false,
"ratio": 3.345833333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44332139892333333,
"avg_score": null,
"num_lines": null
} |
__all__ = ['State']
from ...state import State as BaseState
from ...state import var
class State(BaseState):
home_latitude = var(float, lambda c: -35.362938) # TODO: fixed
home_longitude = var(float, lambda c: 149.165085) # TODO: fixed
altitude = var(float,
lambda c: c.connection.location.global_relative_frame.alt, # noqa: pycodestyle
noise=1.0)
latitude = var(float,
lambda c: c.connection.location.global_relative_frame.lat, # noqa: pycodestyle
noise=0.0005)
longitude = var(float,
lambda c: c.connection.location.global_relative_frame.lon, # noqa: pycodestyle
noise=0.0005)
armable = var(bool, lambda c: c.connection.is_armable)
armed = var(bool, lambda c: c.connection.armed)
mode = var(str, lambda c: c.connection.mode.name)
pitch = var(float, lambda c: c.connection.attitude.pitch, noise=0.05)
yaw = var(float, lambda c: c.connection.attitude.yaw, noise=0.05)
roll = var(float, lambda c: c.connection.attitude.roll, noise=0.05)
heading = var(float, lambda c: c.connection.heading, noise=2)
groundspeed = var(float, lambda c: c.connection.groundspeed, noise=0.05)
ekf_ok = var(bool, lambda c: c.connection.ekf_ok)
throttle_channel = var(float, lambda c: c.connection.channels['3'])
roll_channel = var(float, lambda c: c.connection.channels['1'])
| {
"repo_name": "squaresLab/Houston",
"path": "houston/ardu/rover/state.py",
"copies": "1",
"size": "1441",
"license": "mit",
"hash": 3697347207647927000,
"line_mean": 48.6896551724,
"line_max": 99,
"alpha_frac": 0.6426092991,
"autogenerated": false,
"ratio": 3.3126436781609194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4455252977260919,
"avg_score": null,
"num_lines": null
} |
__all__ = ['State', 'var', 'Variable']
from typing import Dict, Any, Optional, Union, TypeVar, Generic, Type, \
Callable, FrozenSet, Iterator
import logging
import copy
import json
import math
import attr
from . import exceptions
from .connection import Message
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
K = TypeVar('K')
class Variable(object):
def __init__(self,
name: str,
typ: Type,
getter, # FIXME add annotation
noise: Optional[Union[int, float]] = None
) -> None:
"""
Constructs a new state variable
Parameters:
name: the name of this variable.
typ: the type of this variable.
getter: a lambda function, used to obtain the value of this
variable.
noise: the inherent level of noise when measuring this variable.
"""
assert noise is None or noise >= 0
self.__name = name
self.__typ = typ
self.__getter = getter
self.__noise = noise
@property
def typ(self) -> Type:
return self.__typ
@property
def is_noisy(self) -> bool:
return self.__noise is not None
@property
def noise(self) -> Optional[Union[int, float]]:
"""
The inherent level of noise that is to be expected when measuring
this variable. If no noise is expected, None is returned.
"""
return self.__noise
@property
def _field(self) -> str:
"""
The name of the field used to store the value of this variable.
"""
return "__{}".format(self.__name)
@property
def name(self) -> str:
return self.__name
def eq(self, x, y) -> bool:
"""
Determines whether two measurements of this state variable are
considered to be equal.
"""
if not self.is_noisy:
return x == y
d = math.fabs(x - y)
return d <= self.__noise
def neq(self, x, y) -> bool:
"""
Determines whether two measurements of this state variable are not
considered to be equal.
"""
return not self.eq(x, y)
def gt(self, x, y) -> bool:
return x > y
def lt(self, x, y) -> bool:
return x < y
def leq(self, x, y) -> bool:
return not self.gt(x, y)
def geq(self, x, y) -> bool:
return not self.lt(x, y)
def read(self, sandbox):
"""
Inspects the current state of this system variable
"""
return self.__getter(sandbox)
@attr.s(frozen=True)
class VariableBuilder(Generic[K]):
typ = attr.ib(type=Type[K])
getter = attr.ib(type=Callable[['Sandbox'], K])
noise = attr.ib(type=Optional[Union[int, float]])
def build(self, name: str) -> Variable:
return Variable(name, self.typ, self.getter, self.noise)
def var(typ: Type,
getter: Callable[['Sandbox'], K],
noise: Optional[Union[int, float]] = None
) -> VariableBuilder:
return VariableBuilder(typ, getter, noise)
class StateMeta(type):
def __new__(mcl,
cls_name: str,
bases, # FIXME
ns: Dict[str, Any]
):
# collect and build variable definitions
variable_builders = {} # type: Dict[str, VariableBuilder]
for name in ns:
if isinstance(ns[name], VariableBuilder):
logger.debug("found variable: %s", name)
variable_builders[name] = ns[name]
logger.debug("building variables")
# FIXME build frozen dictionary
variables = {
name: b.build(name) for (name, b) in variable_builders.items()
} # type: Dict[str, Variable]
logger.debug("built variables: %s", variables)
logger.debug("storing variables in variables property")
ns['variables'] = variables
logger.debug("stored variables in variables property")
logger.debug("constructing properties")
for name, variable in variables.items():
field = variable._field
getter = lambda self, f=field: getattr(self, f)
ns[variable.name] = property(getter)
logger.debug("constructed properties")
return super().__new__(mcl, cls_name, bases, ns)
class State(object, metaclass=StateMeta):
"""
Describes the state of the system at a given moment in time, in terms of
its internal and external variables.
"""
@classmethod
def from_file(cls: Type['State'], fn: str) -> 'State':
"""
Constructs a system state from a given file, containing a JSON-based
description of its contents.
"""
with open(fn, "r") as f:
jsn = json.load(f)
return cls.from_json(jsn)
@classmethod
def from_dict(cls: Type['State'], d: Dict[str, Any]) -> 'State':
return cls(**d)
def __init__(self, *args, **kwargs) -> None:
cls_name = self.__class__.__name__
variables = self.__class__.variables # type: Dict[str, Variable]
try:
self.__time_offset = kwargs['time_offset']
except KeyError:
msg = "missing keyword argument [time_offset] to constructor [{}]"
msg = msg.format(cls_name)
raise TypeError(msg)
# were any positional arguments passed to the constructor?
if args:
msg = "constructor [{}] accepts no positional arguments but {} {} given" # noqa: pycodestyle
msg = msg.format(cls_name,
"was" if len(args) == 1 else "were",
len(args))
raise TypeError(msg)
# set values for each variable
for name, v in variables.items():
try:
val = kwargs[name]
except KeyError:
msg = "missing keyword argument [{}] to constructor [{}]"
msg = msg.format(name, cls_name)
raise TypeError(msg)
# TODO perform run-time type checking?
setattr(self, v._field, val)
# did we pass any unexpected keyword arguments?
if len(kwargs) > len(variables) + 1:
actual_args = set(n for n in kwargs)
expected_args = \
set(name for name in variables) | {'time_offset'}
unexpected_arguments = list(actual_args - expected_args)
msg = "unexpected keyword arguments [{}] supplied to constructor [{}]" # noqa: pycodestyle
msg = msg.format('; '.join(unexpected_arguments), cls_name)
raise TypeError(msg)
@property
def time_offset(self) -> float:
return self.__time_offset
def equiv(self, other: 'State') -> bool:
if type(self) != type(other):
msg = "illegal comparison of states: [{}] vs. [{}]"
msg = msg.format(self.__class__.__name__, state.__class__.__name__)
raise exceptions.HoustonException(msg)
for n, v in self.__class__.variables.items():
if self.__dict__[v._field] != other.__dict__[v._field]:
return False
return True
def exact(self, other: 'State') -> bool:
return self.equiv(other) and self.time_offset == other.time_offset
__eq__ = exact
def __hash__(self) -> int:
all_vars = (self.time_offset,)
all_vars += tuple(self[v] for v in self.__class__.variables)
return hash(all_vars)
def __getitem__(self, name: str) -> Any:
try:
var = self.__class__.variables[name]
except StopIteration:
msg = "no variable [{}] in state [{}]"
msg.format(name, self.__class__.__name__)
raise KeyError(msg)
return getattr(self, var._field)
def to_dict(self) -> Dict[str, Any]:
fields = {} # type: Dict[str, Any]
fields['time_offset'] = self.__time_offset
for name, var in self.__class__.variables.items():
fields[name] = getattr(self, var._field)
return fields
def __repr__(self) -> str:
fields = self.to_dict()
for (name, val) in fields.items():
if isinstance(val, float):
s = "{:.3f}".format(val)
else:
s = str(val)
fields[name] = val
s = '; '.join(["{}: {}".format(k, v) for (k, v) in fields.items()])
s = "{}({})".format(self.__class__.__name__, s)
return s
def __iter__(self) -> Iterator[Variable]:
yield from self.__class__.variables.values()
def evolve(self, message: Message, time_offset: float) -> 'State':
"""
Create a new state object evloved from this state based on the
received message.
"""
raise NotImplementedError
| {
"repo_name": "squaresLab/Houston",
"path": "houston/state.py",
"copies": "1",
"size": "8906",
"license": "mit",
"hash": -7561222528326115000,
"line_mean": 30.8071428571,
"line_max": 105,
"alpha_frac": 0.5488434763,
"autogenerated": false,
"ratio": 4.136553646075244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000059523809523809524,
"num_lines": 280
} |
# All steps for configuring an SPNG Backend server (except for the JAAS J2C Authentication entries which require a save and server restart to take effect)
import os
myServerName = os.environ["WAS_PROFILE_SERVER_NAME"]
myNodeName = os.environ["MY_WAS_NODE"]
myCellName = myNodeName+"Cell"
doneMarker = "#"
def createJDBCProvider():
providerName = "DB2 Universal JDBC Driver Provider (XA)"
print "Checking for JDBC Provider " + providerName
# Test to see if the provider has already been created.
jdbcdb2provider = AdminConfig.getid("/JDBCProvider:" + providerName + "/")
if len(jdbcdb2provider) == 0:
print 'Creating %s on %s' % (providerName, myServerName)
args = '[-scope Server='+myServerName+' -databaseType DB2 -providerType "DB2 Universal JDBC Driver Provider" \
-implementationType "XA data source" -name "DB2 Universal JDBC Driver Provider (XA)" \
-description "XA DB2 Universal JDBC Driver-compliant Provider. Datasources created under this provider support the use of XA to perform 2-phase commit processing." \
-classpath "${DB2UNIVERSAL_JDBC_DRIVER_PATH}/db2jcc.jar;${UNIVERSAL_JDBC_DRIVER_PATH}/db2jcc_license_cu.jar;${DB2UNIVERSAL_JDBC_DRIVER_PATH}/db2jcc_license_cisuz.jar" \
-nativePath "${DB2UNIVERSAL_JDBC_DRIVER_NATIVEPATH}"]'
AdminTask.createJDBCProvider(args)
else:
print 'DB2 provider already exists, not created.'
#that won't all work without setting up the websphere variables for the DB2UNIVERSAL* items
print "updating DB2UNIVERSAL* variables"
myDbPath = os.environ["DATABASE_PATH"]
myDriverPath = myDbPath+"/java"
myNativePath = myDbPath+"/lib"
driverPathVarName = "DB2UNIVERSAL_JDBC_DRIVER_PATH"
nativePathVarName = "DB2UNIVERSAL_JDBC_DRIVER_NATIVEPATH"
AdminTask.setVariable('[ -scope Server='+myServerName+' -variableName '+driverPathVarName+' -variableValue "'+myDriverPath+'"]')
AdminTask.setVariable('[ -scope Server='+myServerName+' -variableName '+nativePathVarName+' -variableValue "'+myNativePath+'"]')
print doneMarker
#enddef createJDBCProvider
def createDataSources():
print "Creating data sources..."
def createDataSource(server, provider, providerName, dataSourceName, dsJNDIName, cmaAlias, dbServerName, dbPortNum, dbName) :
# check to see if the datasource already exists
dataSource = AdminConfig.getid("/Server:"+server+"/JDBCProvider:"+providerName+"/DataSource:"+dataSourceName+"/")
if len(dataSource) == 0:
print "Creating datasource %s" % (dataSourceName)
# Set the datasource attributes
args = ('[-name %s -jndiName %s -dataStoreHelperClassName com.ibm.websphere.rsadapter.DB2UniversalDataStoreHelper \
-containerManagedPersistence true -componentManagedAuthenticationAlias "%s" -xaRecoveryAuthAlias "%s" \
-configureResourceProperties [[databaseName java.lang.String %s][driverType java.lang.Integer 4] \
[serverName java.lang.String %s][portNumber java.lang.Integer %s]]]') \
% (dataSourceName, dsJNDIName, cmaAlias, cmaAlias, dbName, dbServerName, dbPortNum)
dataSource = AdminTask.createDatasource(provider, args)
else:
print "Datasource %s already exists, no changes" % (dataSourceName)
return
#enddef
server08 = "rslaix08b.dub.usoh.ibm.com"
auth08 = "DbAuth08b"
server23 = "wccaix23b.dub.usoh.ibm.com"
auth23 = "DbAuth23b"
providerName = "DB2 Universal JDBC Driver Provider (XA)"
#fortunately, this doesn't have to have been saved before you can retrieve it
db2provider = AdminConfig.getid('/Server:'+myServerName+'/JDBCProvider:'+providerName+'/')
if len(db2provider) == 0:
raise Exception("Required provider %s does not exist! Abandoning..." % (providerName))
# createDataSource(server, provider, providerName, dataSourceName, dsJNDIName, cmaAlias, dbServerName, dbPortNum, dbName) :
createDataSource(myServerName, db2provider, providerName, "GcsDB", "jdbc/GcsDB", auth08, server08, 50030, "GCS1")
createDataSource(myServerName, db2provider, providerName, "QuestDB", "jdbc/QuestDB", auth23, server23, 50002, "SYSSUPT")
createDataSource(myServerName, db2provider, providerName, "SCSIDB", "jdbc/SCSIDB", auth23, server23, 50000, "SCSI")
createDataSource(myServerName, db2provider, providerName, "SPEDB", "jdbc/SPEDB", auth08, server08, 50030, "SPE")
createDataSource(myServerName, db2provider, providerName, "SPE_CST", "jdbc/SPEDB_CST_Editor", auth23, server23, 50000, "SPE")
createDataSource(myServerName, db2provider, providerName, "SPE_TILT", "jdbc/SPEDB_TILT", auth23, server23, 50000, "SPE")
print doneMarker
#enddef createDataSources
def applySSLCerts():
print "installing SSL Certs"
cwd = os.getcwd()
ssldir=cwd+"/sslcerts"
files = os.listdir(ssldir)
for aFile in files:
alias, fileExtension = os.path.splitext(aFile)
args = '[-keyStoreName NodeDefaultTrustStore -keyStoreScope (cell):%s:(node):%s -certificateFilePath "%s/%s" -base64Encoded true -certificateAlias "%s"]' \
% (myCellName, myNodeName, ssldir, aFile, alias)
AdminTask.addSignerCertificate(args)
print "SSL certificate added for " + aFile
print doneMarker
#enddef applySSLCerts
def createServletCaches():
print "creating servlet caches"
myServerPath = '/Server:'+myServerName+'/'
s1 = AdminConfig.getid(myServerPath)
# setup checkbox to enable servlet caching
wc = AdminConfig.list('WebContainer', s1)
serEnable = [['enableServletCaching', 'true']]
AdminConfig.modify(wc, serEnable)
cp = AdminConfig.list('CacheProvider', s1)
# currently, we use name as jndi name, so just define name once and desired size (I'd suggest a dictionary for these if we add jndi name as different)
# sigh: AdminTask.createServletCacheInstance is not capable of taking a size argument. If we really need this, we'll need to find another way.
# size setting will be ignored for now
caches = [
("hw-quest-cache", 2000),
("icons-content-cache", 2000),
("lookahead", 2000),
("quick-subscribe-cache", 2000),
("scsi-webidentity-cache", 2000),
("search-content-cache", 2000),
]
for cache in caches:
print "Creating servlet cache %s" % (cache[0])
args = "[-name %s -jndiName %s]" % (cache[0], cache[0])
AdminTask.createServletCacheInstance(cp, args)
print doneMarker
#enddef createServletCaches
def createWorkManagers():
print "creating work managers"
server = AdminConfig.getid('/Server:'+myServerName+'/WorkManagerProvider:WorkManagerProvider/')
# work managers to create (use Python dict to make values clear and easier to update/change)
# unless you want to change defaults, only first line needs updating. Some less common attributes left out.
workmanagers = [
{"name":"scsi.entitlements.provider", "jndiName":"wm/entitlements-provider", "description":"SCSI entitlements provider work manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"common.search.work.manager", "jndiName":"wm/cs", "description":"Common Search Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chat.service.work.manager", "jndiName":"wm/chat", "description":"Chat Service Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"seb.service.work.manager", "jndiName":"wm/sebwebservice", "description":"SEB Web Service Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"bookmark.work.manager", "jndiName":"wm/bookmark", "description":"Bookmarks Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"nfluentStat.work.manager", "jndiName":"wm/nfluentStat", "description":"nFluent Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"social.media.data.work.manager", "jndiName":"wm/social_media_data", "description":"Social Media Data Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"shortcut.links.work.manager", "jndiName":"wm/shortcut_links", "description":"Shortcuts Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"client.side.tools.work.manager", "jndiName":"wm/client_side_tools", "description":"Client Side Tools Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"ecc.upload.command.work.manager", "jndiName":"wm/ecc_upload", "description":"ECC upload service Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"productContext.provider.work.manager", "jndiName":"wm/product_context_provider", "description":"Product Context Provider Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"contract.data.provider.work.manager", "jndiName":"wm/contract_data_provider", "description":"Contract Data Provider Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"consolidatedInventory.provider.work.manager", "jndiName":"wm/consolidated_inventory_provider", "description":"Consolidated Inventory Data Provider Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"notificationSubscriptions.provider.work.manager", "jndiName":"wm/notification_subscriptions_provider", "description":"Notification Subscriptions Data Provider Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"inventoryTranslator.work.manager", "jndiName":"wm/inventory_translator", "description":"Inventory Translator Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweSystems.work.manager", "jndiName":"wm/chwe_systems", "description":"CHWE Systems Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweProbRpt.work.manager", "jndiName":"wm/chwe_problem_report", "description":"CHWE ProblemReport Work Manager",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweRecentEvents.work.manager", "jndiName":"wm/recent_events", "description":"CHWE Recent Events",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweHeartbeatEvents.work.manager", "jndiName":"wm/heartbeat_events", "description":"CHWE Heartbeat Events ",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweSystemSummary.work.manager", "jndiName":"wm/system_summary", "description":"CHWE System Summary",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweEventSummary.work.manager", "jndiName":"wm/event_summary", "description":"CHWE Event Summary",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
{"name":"chweApprovePendingSystem.work.manager", "jndiName":"wm/approve_pending_system", "description":"CHWE Approve a pending system",
"workReqQFullAction":"0", "minThreads":"0", "numAlarmThreads":"2", "workReqQSize":"0", "maxThreads":"2", "isGrowable":"true",
"threadPriority":"5", "workTimeout":"0"},
]
for wm in workmanagers:
wmattrs = \
'[\
[name "%s"] \
[jndiName "%s"] \
[description "%s"] \
[minThreads "%s"] \
[maxThreads "%s"] \
[threadPriority "%s"] \
[isGrowable "%s"] \
[numAlarmThreads "%s"] \
[workReqQSize "%s"] \
[workTimeout "%s"] \
[workReqQFullAction "%s"] \
]' % (wm["name"], wm["jndiName"], wm["description"], wm["minThreads"], wm["maxThreads"], wm["threadPriority"], wm["isGrowable"],
wm["numAlarmThreads"], wm["workReqQSize"], wm["workTimeout"], wm["workReqQFullAction"], )
AdminConfig.create('WorkManagerInfo', server, wmattrs)
print "Created %s work managers" % len(workmanagers)
print doneMarker
#enddef createWorkManagers
def testDataSources():
print "testing data sources"
def testDataSource(server, providerName, dataSourceName) :
# check to see if the datasource already exists
dataSource = AdminConfig.getid("/Server:"+server+"/JDBCProvider:"+providerName+"/DataSource:"+dataSourceName+"/")
print 'Testing datasource %s connection...' % (dataSourceName)
print AdminControl.testConnection(dataSource)
return
#enddef
providerName = "DB2 Universal JDBC Driver Provider (XA)"
db2provider = AdminConfig.getid('/Server:'+myServerName+'/JDBCProvider:'+providerName+'/')
testDataSource(myServerName, providerName, "GcsDB")
testDataSource(myServerName, providerName, "QuestDB")
testDataSource(myServerName, providerName, "SCSIDB")
testDataSource(myServerName, providerName, "SPEDB")
testDataSource(myServerName, providerName, "SPE_CST")
testDataSource(myServerName, providerName, "SPE_TILT")
print doneMarker
#enddef testDataSources
# start configuration process
createJDBCProvider()
createDataSources()
applySSLCerts()
createServletCaches()
createWorkManagers()
#Can't test data souces until after we save. If we got here, should be error free, so...
print "Everything is looking good. Saving changes so we can test the data sources..."
AdminConfig.save()
testDataSources()
print "Yay! Your server is configured"
| {
"repo_name": "millarde/wasadmin-automation",
"path": "ConfigureServer.py",
"copies": "1",
"size": "16418",
"license": "mit",
"hash": 1827793537246454300,
"line_mean": 52.4788273616,
"line_max": 187,
"alpha_frac": 0.6623827506,
"autogenerated": false,
"ratio": 3.5583008235804074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47206835741804076,
"avg_score": null,
"num_lines": null
} |
__all__ = ['stmts_from_json', 'stmts_from_json_file', 'stmts_to_json',
'stmts_to_json_file', 'draw_stmt_graph', 'pretty_print_stmts',
'UnresolvedUuidError', 'InputError',
'set_pretty_print_max_width', 'print_stmt_summary']
import json
import logging
from collections import Counter
from typing import Collection, List, Optional
from indra.statements.statements import Statement, Unresolved
logger = logging.getLogger(__name__)
def stmts_from_json(json_in, on_missing_support='handle'):
"""Get a list of Statements from Statement jsons.
In the case of pre-assembled Statements which have `supports` and
`supported_by` lists, the uuids will be replaced with references to
Statement objects from the json, where possible. The method of handling
missing support is controled by the `on_missing_support` key-word argument.
Parameters
----------
json_in : iterable[dict]
A json list containing json dict representations of INDRA Statements,
as produced by the `to_json` methods of subclasses of Statement, or
equivalently by `stmts_to_json`.
on_missing_support : Optional[str]
Handles the behavior when a uuid reference in `supports` or
`supported_by` attribute cannot be resolved. This happens because uuids
can only be linked to Statements contained in the `json_in` list, and
some may be missing if only some of all the Statements from pre-
assembly are contained in the list.
Options:
- *'handle'* : (default) convert unresolved uuids into `Unresolved`
Statement objects.
- *'ignore'* : Simply omit any uuids that cannot be linked to any
Statements in the list.
- *'error'* : Raise an error upon hitting an un-linkable uuid.
Returns
-------
stmts : list[:py:class:`Statement`]
A list of INDRA Statements.
"""
stmts = []
uuid_dict = {}
for json_stmt in json_in:
try:
st = Statement._from_json(json_stmt)
except Exception as e:
logger.warning("Error creating statement: %s" % e)
continue
stmts.append(st)
uuid_dict[st.uuid] = st
for st in stmts:
_promote_support(st.supports, uuid_dict, on_missing_support)
_promote_support(st.supported_by, uuid_dict, on_missing_support)
return stmts
def stmts_from_json_file(fname, format='json'):
"""Return a list of statements loaded from a JSON file.
Parameters
----------
fname : str
Path to the JSON file to load statements from.
format : Optional[str]
One of 'json' to assume regular JSON formatting or
'jsonl' assuming each statement is on a new line.
Returns
-------
list[indra.statements.Statement]
The list of INDRA Statements loaded from the JSOn file.
"""
with open(fname, 'r') as fh:
if format == 'json':
return stmts_from_json(json.load(fh))
else:
return stmts_from_json([json.loads(line)
for line in fh.readlines()])
def stmts_to_json_file(stmts, fname, format='json', **kwargs):
"""Serialize a list of INDRA Statements into a JSON file.
Parameters
----------
stmts : list[indra.statement.Statements]
The list of INDRA Statements to serialize into the JSON file.
fname : str
Path to the JSON file to serialize Statements into.
format : Optional[str]
One of 'json' to use regular JSON with indent=1 formatting or
'jsonl' to put each statement on a new line without indents.
"""
sj = stmts_to_json(stmts, **kwargs)
with open(fname, 'w') as fh:
if format == 'json':
json.dump(sj, fh, indent=1)
else:
for json_stmt in sj:
json.dump(json_stmt, fh)
fh.write('\n')
def stmts_to_json(stmts_in, use_sbo=False, matches_fun=None):
"""Return the JSON-serialized form of one or more INDRA Statements.
Parameters
----------
stmts_in : Statement or list[Statement]
A Statement or list of Statement objects to serialize into JSON.
use_sbo : Optional[bool]
If True, SBO annotations are added to each applicable element of the
JSON. Default: False
matches_fun : Optional[function]
A custom function which, if provided, is used to construct the
matches key which is then hashed and put into the return value.
Default: None
Returns
-------
json_dict : dict
JSON-serialized INDRA Statements.
"""
if not isinstance(stmts_in, list):
json_dict = stmts_in.to_json(use_sbo=use_sbo)
return json_dict
else:
json_dict = [st.to_json(use_sbo=use_sbo, matches_fun=matches_fun)
for st in stmts_in]
return json_dict
def _promote_support(sup_list, uuid_dict, on_missing='handle'):
"""Promote the list of support-related uuids to Statements, if possible."""
valid_handling_choices = ['handle', 'error', 'ignore']
if on_missing not in valid_handling_choices:
raise InputError('Invalid option for `on_missing_support`: \'%s\'\n'
'Choices are: %s.'
% (on_missing, str(valid_handling_choices)))
for idx, uuid in enumerate(sup_list):
if uuid in uuid_dict.keys():
sup_list[idx] = uuid_dict[uuid]
elif on_missing == 'handle':
sup_list[idx] = Unresolved(uuid)
elif on_missing == 'ignore':
sup_list.remove(uuid)
elif on_missing == 'error':
raise UnresolvedUuidError("Uuid %s not found in stmt jsons."
% uuid)
return
def draw_stmt_graph(stmts):
"""Render the attributes of a list of Statements as directed graphs.
The layout works well for a single Statement or a few Statements at a time.
This function displays the plot of the graph using plt.show().
Parameters
----------
stmts : list[indra.statements.Statement]
A list of one or more INDRA Statements whose attribute graph should
be drawn.
"""
import networkx
try:
import matplotlib.pyplot as plt
except Exception:
logger.error('Could not import matplotlib, not drawing graph.')
return
try: # This checks whether networkx has this package to work with.
import pygraphviz
except Exception:
logger.error('Could not import pygraphviz, not drawing graph.')
return
import numpy
g = networkx.compose_all([stmt.to_graph() for stmt in stmts])
plt.figure()
plt.ion()
g.graph['graph'] = {'rankdir': 'LR'}
pos = networkx.drawing.nx_agraph.graphviz_layout(g, prog='dot')
g = g.to_undirected()
# Draw nodes
options = {
'marker': 'o',
's': 200,
'c': [0.85, 0.85, 1],
'facecolor': '0.5',
'lw': 0,
}
ax = plt.gca()
nodelist = list(g)
xy = numpy.asarray([pos[v] for v in nodelist])
node_collection = ax.scatter(xy[:, 0], xy[:, 1], **options)
node_collection.set_zorder(2)
# Draw edges
networkx.draw_networkx_edges(g, pos, arrows=False, edge_color='0.5')
# Draw labels
edge_labels = {(e[0], e[1]): e[2].get('label') for e in g.edges(data=True)}
networkx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels)
node_labels = {n[0]: n[1].get('label') for n in g.nodes(data=True)}
for key, label in node_labels.items():
if len(label) > 25:
parts = label.split(' ')
parts.insert(int(len(parts)/2), '\n')
label = ' '.join(parts)
node_labels[key] = label
networkx.draw_networkx_labels(g, pos, labels=node_labels)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
pretty_print_max_width = None
def set_pretty_print_max_width(new_max):
"""Set the max display width for pretty prints, in characters."""
global pretty_print_max_width
if new_max is not None and not isinstance(new_max, int):
raise ValueError("Max width must be an integer or None.")
pretty_print_max_width = new_max
def pretty_print_stmts(stmt_list: List[Statement],
stmt_limit: Optional[int] = None,
ev_limit: Optional[int] = 5,
width: Optional[int] = None) -> None:
"""Print a formatted list of statements along with evidence text.
Requires the tabulate package (https://pypi.org/project/tabulate).
Parameters
----------
stmt_list : List[Statement]
The list of INDRA Statements to be printed.
stmt_limit : Optional[int]
The maximum number of INDRA Statements to be printed. If None, all
Statements are printed. (Default is None)
ev_limit : Optional[int]
The maximum number of Evidence to print for each Statement. If None, all
evidence will be printed for each Statement. (Default is 5)
width : Optional[int]
Manually set the width of the table. If `None` the function will try to
match the current terminal width using `os.get_terminal_size()`. If
this fails the width defaults to 80 characters. The maximum width can
be controlled by setting :data:`pretty_print_max_width` using the
:func:`set_pretty_print_max_width` function. This is useful in
Jupyter notebooks where the environment returns a terminal size
of 80 characters regardless of the width of the window. (Default
is None).
"""
# Import some modules helpful for text formatting.
from textwrap import TextWrapper
from tabulate import tabulate
from os import get_terminal_size
# Try to get the actual number of columns in the terminal.
if width is None:
width = 80
try:
width = get_terminal_size().columns
except Exception as e:
logger.debug(f"Failed to get terminal size (using default "
f"{width}): {e}.")
# Apply the maximum.
if pretty_print_max_width is not None:
assert isinstance(pretty_print_max_width, int)
width = min(width, pretty_print_max_width)
# Parameterize the text wrappers that format the ev text and the metadata.
stmt_tr = TextWrapper(width=width)
metadata_tr = TextWrapper(width=16)
evidence_tr = TextWrapper(width=width - metadata_tr.width - 2)
# Print the table.
for i, s in enumerate(stmt_list[:stmt_limit]):
# Print the Statement heading.
stmt_str = f"[LIST INDEX: {i}] " + str(s)
print(stmt_tr.fill(stmt_str))
print("="*width)
# Print the evidence
for j, ev in enumerate(s.evidence[:ev_limit]):
# Gather the metadata we want to display.
metadata = [("EV INDEX", j), ("SOURCE", ev.source_api)]
for id_type in ['PMID', 'PMCID', 'DOI']:
if id_type in ev.text_refs:
metadata.append((id_type, ev.text_refs[id_type]))
break
# Form the metadata string to fill out its allocated space.
metadata_str = '\n'.join(line + ' '*(metadata_tr.width - len(line))
for k, v in metadata
for line in metadata_tr.wrap(f"{k}: {v}"))
# Form the evidence string.
if ev.text:
text_str = evidence_tr.fill(ev.text)
else:
text_str = evidence_tr.fill("(No evidence text)")
# Print the entire thing
full_str = tabulate([[metadata_str, text_str]], tablefmt='plain')
print(full_str)
print('-'*width)
print()
def print_stmt_summary(statements: Collection[Statement]):
"""Print a summary of a list of statements by statement type
Requires the tabulate package (https://pypi.org/project/tabulate).
Parameters
----------
statements : List[Statement]
The list of INDRA Statements to be printed.
"""
from tabulate import tabulate
print(tabulate(
Counter(
statement.__class__.__name__
for statement in statements
).most_common(),
headers=["Statement Type", "Count"],
tablefmt='github',
))
class UnresolvedUuidError(Exception):
pass
class InputError(Exception):
pass
| {
"repo_name": "bgyori/indra",
"path": "indra/statements/io.py",
"copies": "3",
"size": "12526",
"license": "bsd-2-clause",
"hash": -4281784553078690000,
"line_mean": 34.4844192635,
"line_max": 80,
"alpha_frac": 0.6084144978,
"autogenerated": false,
"ratio": 3.9601644008852355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6068578898685235,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Store']
import os
import sys
import thread
import threading
from collections import defaultdict, deque
from .database import Database
from .transaction import Transaction
class Store(object):
def __init__(self, data_path=None):
self.data_path = data_path
self.opened = False
self.databases = {}
self.transactions = defaultdict(deque)
self.commiting_transactions = set()
self.check_lock = threading.Lock()
def __enter__(self):
# open self if not
if not self.is_opened():
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.is_opened():
self.close()
return False
def get_path(self):
return self.data_path
def is_opened(self):
return self.opened
def open(self):
self.opened = True
def close(self):
for db_name, db in self.databases.items():
if db.is_opened():
db.close()
self.opened = False
def database(self, db_name):
# open self if not
if not self.is_opened():
self.open()
db = Database(self, db_name)
self.databases[db_name] = db
return db
def transaction(self):
# open self if not
if not self.is_opened():
self.open()
tx = Transaction(self)
return tx
def get_current_transaction(self):
# currently running transaction in current thread
tx_queue = self.transactions[thread.get_ident()]
tx = tx_queue[-1]
return tx
| {
"repo_name": "yadb/yadb",
"path": "backup/store/store.py",
"copies": "1",
"size": "1632",
"license": "mit",
"hash": 1701569158208975000,
"line_mean": 21.985915493,
"line_max": 57,
"alpha_frac": 0.568627451,
"autogenerated": false,
"ratio": 4.173913043478261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022356360384529397,
"num_lines": 71
} |
__all__ = ['string_converter', 'datetuple_converter', 'boolean_converter',
'file_converter','json_converter']
import csv
from cStringIO import StringIO
from datetime import date, datetime, time
from simplegeneric import generic
import schemaish
try:
import decimal
haveDecimal = True
except ImportError:
haveDecimal = False
from convertish.util import SimpleTZInfo
class ConvertError(Exception):
"""
Exception to indicate failure in converting values.
"""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message
__unicode__ = __str__
# Hide Python 2.6 deprecation warnings.
def _get_message(self): return self._message
def _set_message(self, message): self._message = message
message = property(_get_message, _set_message)
class Converter(object):
def __init__(self, schema_type, **k):
self.schema_type = schema_type
self.converter_options = k.pop('converter_options', {})
def from_type(self, value, converter_options={}):
"""
convert from i.e. for NumberToString converter - from number to string
"""
raise NotImplementedError()
def to_type(self, value, converter_options={}):
"""
convert to i.e. for NumberToString converter - to number from string
"""
raise NotImplementedError()
class NullConverter(Converter):
def from_type(self, value, converter_options={}):
return value
def to_type(self, value, converter_options={}):
return value
class NumberToStringConverter(Converter):
cast = None
type_string = 'number'
def from_type(self, value, converter_options={}):
if value is None:
return None
return str(value)
def to_type(self, value, converter_options={}):
if value is None:
return None
# "Cast" the value to the correct type. For some strange reason,
# Python's decimal.Decimal type raises an ArithmeticError when it's
# given a dodgy value.
value = value.strip()
try:
value = self.cast(value)
except (ValueError, ArithmeticError):
raise ConvertError("Not a valid %s"%self.type_string)
return value
class IntegerToStringConverter(NumberToStringConverter):
cast = int
type_string = 'integer'
class FloatToStringConverter(NumberToStringConverter):
cast = float
if haveDecimal:
class DecimalToStringConverter(NumberToStringConverter):
cast = decimal.Decimal
class FileToStringConverter(Converter):
"""
Convert between a text File and a string.
The file's content is assumed to be a UTF-8 encoded string. Anything else
will almost certainly break the code and/or page.
Converting from a string to a File instance returns a new File with a
default name, content.txt, of type text/plain.
"""
def from_type(self, value, converter_options={}):
if value is None:
return None
if not value.file:
raise ValueError('Cannot convert to string without a file-like '
'object to read from')
return value.file.read().decode('utf-8')
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
return schemaish.type.File(StringIO(value.encode('utf-8')),
'content.txt', 'text/plain')
class BooleanToStringConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
if value:
return 'True'
return 'False'
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
if value not in ('True', 'False'):
raise ConvertError('%r should be either True or False'%value)
return value == 'True'
class DateToStringConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
return value.isoformat()
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
return _parse_date(value)
class TimeToStringConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
return value.isoformat()
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
return _parse_time(value)
class DateTimeToStringConverter(Converter):
def from_type(self, value, converter_options={}):
return value.isoformat()
def to_type(self, value, converter_options={}):
d, t = value.split('T')
d = _parse_date(d)
t = _parse_time(t)
return datetime(d.year, d.month, d.day, t.hour, t.minute, t.second,
t.microsecond, t.tzinfo)
def _parse_date(value):
try:
y, m, d = [int(p) for p in value.split('-')]
except ValueError:
raise ConvertError('Invalid date')
try:
value = date(y, m, d)
except ValueError, e:
raise ConvertError('Invalid date: '+str(e))
return value
def _parse_time(value):
# Parse the timezone offset
if '+' in value:
value, tz = value.split('+')
tzdir = 1
elif '-' in value:
value, tz = value.split('-')
tzdir = -1
else:
tz = None
if tz:
hours, minutes = tz.split(':')
tz = SimpleTZInfo(tzdir*((int(hours)*60) + int(minutes)))
# Parse milliseconds.
if '.' in value:
value, ms = value.split('.')
else:
ms = 0
# Parse hours, minutes and seconds.
try:
parts = value.split(':')
if len(parts)<2 or len(parts)>3:
raise ValueError()
if len(parts) == 2:
h, m = parts
s = 0
else:
h, m, s = parts
h, m, s, ms = int(h), int(m), int(s), int(ms)
except:
raise ConvertError('Invalid time')
try:
value = time(h, m, s, ms, tz)
except ValueError, e:
raise ConvertError('Invalid time: '+str(e))
return value
class DateToDateTupleConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
return value.year, value.month, value.day
def to_type(self, value, converter_options={}):
if value is None:
return None
try:
try:
V = [int(v) for v in value]
except ValueError:
raise ConvertError('Invalid Number')
value = date(*V)
except (TypeError, ValueError), e:
raise ConvertError('Invalid date: '+str(e))
return value
def getDialect(delimiter=','):
class Dialect(csv.excel):
def __init__(self, *a, **k):
self.delimiter = k.pop('delimiter',',')
csv.excel.__init__(self,*a, **k)
return Dialect(delimiter=delimiter)
def convert_csvrow_to_list(row, delimiter=','):
sf = StringIO()
sf.write(row.encode('utf-8'))
sf.seek(0,0)
reader = csv.reader(sf, dialect=getDialect(delimiter=delimiter))
return list(_decode_row(reader.next()))
def convert_list_to_csvrow(l, delimiter=','):
sf = StringIO()
writer = csv.writer(sf, dialect=getDialect(delimiter=delimiter))
writer.writerow(list(_encode_row(l)))
sf.seek(0,0)
return sf.read().strip().decode('utf-8')
def _encode_row(row, encoding='utf-8'):
for cell in row:
if cell is not None:
cell = cell.encode(encoding)
yield cell
def _decode_row(row, encoding='utf-8'):
for cell in row:
yield cell.decode(encoding)
class SequenceToStringConverter(Converter):
"""
I'd really like to have the converter options on the init but ruledispatch
won't let me pass keyword arguments
"""
def __init__(self, schema_type, **k):
Converter.__init__(self, schema_type, **k)
def from_type(self, value, converter_options={}):
if value is None:
return None
delimiter = converter_options.get('delimiter',',')
if isinstance(self.schema_type.attr, schemaish.Sequence):
out = []
for line in value:
lineitems = [
string_converter(self.schema_type.attr.attr).from_type(item) \
for item in line]
linestring = convert_list_to_csvrow( \
lineitems, delimiter=delimiter)
out.append(linestring)
return '\n'.join(out)
elif isinstance(self.schema_type.attr, schemaish.Tuple):
out = []
for line in value:
lineitems = [
string_converter(self.schema_type.attr.attrs[n]).from_type(item) \
for n,item in enumerate(line) ]
linestring = convert_list_to_csvrow( \
lineitems, delimiter=delimiter)
out.append(linestring)
return '\n'.join(out)
else:
value = [string_converter(self.schema_type.attr).from_type(v) \
for v in value]
return convert_list_to_csvrow(value, delimiter=delimiter)
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
delimiter = converter_options.get('delimiter',',')
if isinstance(self.schema_type.attr, schemaish.Sequence):
out = []
for line in value.split('\n'):
l = convert_csvrow_to_list(line, delimiter=delimiter)
convl = [
string_converter(self.schema_type.attr.attr).to_type(v) \
for v in l]
out.append( convl )
return out
if isinstance(self.schema_type.attr, schemaish.Tuple):
out = []
for line in value.split('\n'):
l = convert_csvrow_to_list(line, delimiter=delimiter)
convl = [string_converter(self.schema_type.attr.attrs[n]).to_type(v) \
for n,v in enumerate(l)]
out.append( tuple(convl) )
return out
else:
if delimiter != '\n' and len(value.split('\n')) > 1:
raise ConvertError("More than one line found" \
" for csv with delimiter=\'%s\'"%delimiter)
if delimiter == '\n':
out = value.splitlines()
else:
out = convert_csvrow_to_list(value, delimiter=delimiter)
return [string_converter(self.schema_type.attr).to_type(v) \
for v in out]
class TupleToStringConverter(Converter):
"""
Convert a tuple to and from a string.
XXX tim: I'd really like to have the converter options on the init but ruledispatch
won't let me pass keyword arguments
XXX matt: the default to_type items should be configurable but None is
better than '' because it doesn't crash the item's converter ;-).
"""
def __init__(self, schema_type, **k):
Converter.__init__(self, schema_type, **k)
def from_type(self, value, converter_options={}):
if value is None:
return None
delimiter = converter_options.get('delimiter',',')
lineitems = [string_converter(self.schema_type.attrs[n]).from_type(item) \
for n,item in enumerate(value)]
return convert_list_to_csvrow(lineitems, delimiter=delimiter)
def to_type(self, value, converter_options={}):
if value is None:
return None
value = value.strip()
delimiter = converter_options.get('delimiter',',')
l = convert_csvrow_to_list(value, delimiter=delimiter)
if len(l) > len(self.schema_type.attrs):
raise ConvertError('Too many arguments')
if len(l) < len(self.schema_type.attrs):
raise ConvertError('Too few arguments')
def convert_or_none(n, v):
v = v.strip()
if not v:
return None
return string_converter(self.schema_type.attrs[n]).to_type(v)
return tuple(convert_or_none(n, v) for (n, v) in enumerate(l))
class TupleToListConverter(Converter):
def __init__(self, schema_type, **k):
Converter.__init__(self, schema_type, **k)
def from_type(self, value, converter_options={}):
if value is None:
return None
return list(value)
def to_type(self, value, converter_options={}):
if value is None:
return None
return tuple(value)
####
#
# String Converter
@generic
def string_converter(schema_type):
pass
@string_converter.when_type(schemaish.String)
def string_to_string(schema_type):
return NullConverter(schema_type)
@string_converter.when_type(schemaish.Integer)
def int_to_string(schema_type):
return IntegerToStringConverter(schema_type)
@string_converter.when_type(schemaish.Float)
def float_to_string(schema_type):
return FloatToStringConverter(schema_type)
@string_converter.when_type(schemaish.Decimal)
def decimal_to_string(schema_type):
return DecimalToStringConverter(schema_type)
@string_converter.when_type(schemaish.Date)
def date_to_string(schema_type):
return DateToStringConverter(schema_type)
@string_converter.when_type(schemaish.Time)
def time_to_string(schema_type):
return TimeToStringConverter(schema_type)
@string_converter.when_type(schemaish.DateTime)
def datetime_to_string(schema_type):
return DateTimeToStringConverter(schema_type)
@string_converter.when_type(schemaish.Sequence)
def sequence_to_string(schema_type):
return SequenceToStringConverter(schema_type)
@string_converter.when_type(schemaish.Tuple)
def tuple_to_string(schema_type):
return TupleToStringConverter(schema_type)
@string_converter.when_type(schemaish.Boolean)
def boolean_to_string(schema_type):
return BooleanToStringConverter(schema_type)
@string_converter.when_type(schemaish.File)
def file_to_string(schema_type):
return FileToStringConverter(schema_type)
####
#
# Date Tuple Converter
@generic
def datetuple_converter(schema_type):
pass
@datetuple_converter.when_type(schemaish.Date)
def date_to_datetuple(schema_type):
return DateToDateTupleConverter(schema_type)
####
#
# Boolean Converter
@generic
def boolean_converter(schema_type):
pass
@boolean_converter.when_type(schemaish.Boolean)
def boolean_to_boolean(schema_type):
return NullConverter(schema_type)
@generic
def file_converter(schema_type):
pass
@file_converter.when_type(schemaish.File)
def file_to_file(schema_type):
return NullConverter(schema_type)
####
#
# JSON Converter
class DateToJSONConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
return {'__type__':'date','year':value.year, 'month':value.month, 'day':value.day}
def to_type(self, value, converter_options={}):
if value is None:
return None
try:
try:
year, month, day = int(value['year']) ,int(value['month']) ,int(value['day']) ,
except ValueError:
raise ConvertError('Invalid Number')
value = date(year, month, day)
except (TypeError, ValueError), e:
raise ConvertError('Invalid date: '+str(e))
return value
class TimeToJSONConverter(Converter):
def from_type(self, value, converter_options={}):
if value is None:
return None
return {'__type__':'time','hour':value.hour, 'minute':value.minute, 'second':value.second,'microsecond':value.microsecond}
def to_type(self, value, converter_options={}):
if value is None:
return None
try:
try:
h, m, s, ms = int(value['hour']), int(value['minute']), int(value['second']) ,int(value.get('microsecond',0))
except ValueError:
raise ConvertError('Invalid Number')
value = time(h, m, s, ms)
except (TypeError, ValueError), e:
raise ConvertError('Invalid time: '+str(e))
return value
@generic
def json_converter(schema_type):
pass
@json_converter.when_type(schemaish.String)
def string_to_json(schema_type):
return NullConverter(schema_type)
@json_converter.when_type(schemaish.Integer)
def int_to_json(schema_type):
return NullConverter(schema_type)
@json_converter.when_type(schemaish.Float)
def float_to_json(schema_type):
return NullConverter(schema_type)
# XXX
@json_converter.when_type(schemaish.Decimal)
def decimal_to_json(schema_type):
return NullConverter(schema_type)
@json_converter.when_type(schemaish.Date)
def date_to_json(schema_type):
return DateToJSONConverter(schema_type)
@json_converter.when_type(schemaish.Time)
def time_to_json(schema_type):
return TimeToJSONConverter(schema_type)
@json_converter.when_type(schemaish.Sequence)
def sequence_to_json(schema_type):
return NullConverter(schema_type)
@json_converter.when_type(schemaish.Tuple)
def tuple_to_json(schema_type):
return TupleToListConverter(schema_type)
@json_converter.when_type(schemaish.Boolean)
def boolean_to_json(schema_type):
return NullConverter(schema_type)
@json_converter.when_type(schemaish.File)
def file_to_json(schema_type):
return FileToStringConverter(schema_type)
| {
"repo_name": "ish/convertish",
"path": "convertish/convert.py",
"copies": "1",
"size": "18069",
"license": "bsd-3-clause",
"hash": -279441433810245900,
"line_mean": 28.6699507389,
"line_max": 130,
"alpha_frac": 0.6055122032,
"autogenerated": false,
"ratio": 3.982587612960106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088099816160105,
"avg_score": null,
"num_lines": null
} |
# All strings in python are unicode strings
# create string
str1 = "string one"
str2 = 'string two'
print(str1, str2)
# ascii char
c1_m = 'm'
# octal
c2_m = '\155'
# hex
c3_m = '\x6D'
# unicode (python 3)
c4_m = '\N{LATIN SMALL LETTER M}'
print(c1_m, c2_m, c3_m, c4_m)
# join some words into a space separated sentence
space_sentence = " ".join(["You", "can't", "always", "get", "what", "you", "want"])
print (space_sentence)
# again with underscores
print("_".join(["You", "can't", "always", "get", "what", "you", "want"]))
# split a sentence into words
words = space_sentence.split()
print (words)
# parses out tabs, new lines, etc...
sentence_2 = "Happy happy\t Joy joy \n lol \r\n"
words = sentence_2.split()
print (words)
# replacing strings
doc="{my_doc_contents}"
print(doc.replace('{', '<my_doc>\n ').replace('}', '\n</my_doc>\n'))
# Return the string representation of an object - like an object's to string method.
# it returns 'the formal string representation of the object.
test = [1, 2, 3, '4', ['five', 'six']]
print (repr(test))
# works agains python functions and almost any other pythin object
print (repr(len))
| {
"repo_name": "kevgraham7/toolbox",
"path": "python/samples/basics/strings_demo.py",
"copies": "1",
"size": "1144",
"license": "apache-2.0",
"hash": 6883313253021699000,
"line_mean": 23.8695652174,
"line_max": 84,
"alpha_frac": 0.652972028,
"autogenerated": false,
"ratio": 2.838709677419355,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3991681705419355,
"avg_score": null,
"num_lines": null
} |
__all__ = ['strip_comments']
def strip_comments(contents):
"""Strips the comments from coq code in contents.
The strings in contents are only preserved if there are no
comment-like tokens inside of strings. Stripping should be
successful and correct, regardless of whether or not there are
comment-like tokens in strings.
The behavior of this method is undefined if there are any
notations which change the meaning of '(*', '*)', or '"'.
Note that we take some extra care to leave *) untouched when it
does not terminate a comment.
"""
contents = contents.replace('(*', ' (* ').replace('*)', ' *) ')
tokens = contents.split(' ')
rtn = []
is_string = False
comment_level = 0
for token in tokens:
do_append = (comment_level == 0)
if is_string:
if token.count('"') % 2 == 1: # there are an odd number of '"' characters, indicating that we've ended the string
is_string = False
elif token.count('"') % 2 == 1: # there are an odd number of '"' characters, so we're starting a string
is_string = True
elif token == '(*':
comment_level += 1
do_append = False
elif comment_level > 0 and token == '*)':
comment_level -= 1
if do_append:
rtn.append(token)
return ' '.join(rtn).replace(' (* ', '(*').replace(' *) ', '*)').strip('\n\t ')
| {
"repo_name": "JasonGross/coq-tools",
"path": "strip_comments.py",
"copies": "1",
"size": "1434",
"license": "mit",
"hash": -3887989297886766000,
"line_mean": 38.8333333333,
"line_max": 125,
"alpha_frac": 0.5808926081,
"autogenerated": false,
"ratio": 4.1686046511627906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002193562610229277,
"num_lines": 36
} |
__all__ = ["StructureFile", "StructureFileData"]
from nbtlib import File, CompoundSchema, tag
class StructureFileData(CompoundSchema):
"""Schema that matches the Minecraft structure file format."""
class BlockState(CompoundSchema):
schema = {
"Name": tag.String,
"Properties": tag.Compound,
}
class Block(CompoundSchema):
schema = {
"state": tag.Int,
"pos": tag.List[tag.Int],
"nbt": tag.Compound,
}
class Entity(CompoundSchema):
schema = {
"pos": tag.List[tag.Double],
"blockPos": tag.List[tag.Int],
"nbt": tag.Compound,
}
schema = {
"DataVersion": tag.Int,
"author": tag.String,
"size": tag.List[tag.Int],
"palette": tag.List[BlockState],
"palettes": tag.List[tag.List[BlockState]],
"blocks": tag.List[Block],
"entities": tag.List[Entity],
}
class StructureFile(File, CompoundSchema):
"""Class representing a Minecraft structure file."""
schema = {"": StructureFileData}
strict = True
def __init__(self, structure_data=None, *, filename=None):
super().__init__({"": structure_data or {}}, gzipped=True, filename=filename)
@classmethod
def load(cls, filename):
return super().load(filename, gzipped=True)
| {
"repo_name": "vberlier/nbtlib",
"path": "nbtlib/contrib/minecraft/structure.py",
"copies": "1",
"size": "1383",
"license": "mit",
"hash": 1450197719683052000,
"line_mean": 25.5961538462,
"line_max": 85,
"alpha_frac": 0.5726681128,
"autogenerated": false,
"ratio": 3.928977272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022361359570661896,
"num_lines": 52
} |
__all__ = ['SUB']
from .base import heads_precedence, ArithmeticHead
class SubHead(ArithmeticHead):
"""
SubHead represents subtraction n-ary operation where operands is
given as a n-tuple of expressions.
"""
op_mth = '__sub__'
op_rmth = '__rsub__'
def new(self, cls, operands, evaluate=True):
n = len(operands)
if n==1:
return operands[0]
if n==0:
return cls(NUMBER, 0)
return cls(self, operands)
def __repr__(self): return 'SUB'
def reevaluate(self, cls, operands):
r = operands[0] if operands else cls(NUMBER, 0)
for op in operands[1:]:
r -= op
return r
def data_to_str_and_precedence(self, cls, operands):
m = len(operands)
if m==0:
return '0', heads_precedence.NUMBER
if m==1:
op = operands[0]
return op.head.data_to_str_and_precedence(cls, op.data)
sub_p = heads_precedence.SUB
r = ''
for op in operands:
t,t_p = op.head.data_to_str_and_precedence(cls, op.data)
if not r:
r += '(' + t + ')' if t_p < sub_p else t
elif t.startswith('-') and t_p > sub_p:
r += ' + ' + t[1:]
else:
r += ' - (' + t + ')' if t_p <= sub_p else ' - ' + t
return r, sub_p
def walk(self, func, cls, data, target):
l = []
flag = False
for op in data:
o = op.head.walk(func, cls, op.data, op)
if op is not o:
flag = True
l.append(o)
if flag:
r = SUB.new(cls, l)
return func(cls, r.head, r.data, r)
return func(cls, self, data, target)
SUB = SubHead()
| {
"repo_name": "pearu/sympycore",
"path": "sympycore/heads/sub.py",
"copies": "1",
"size": "1781",
"license": "bsd-3-clause",
"hash": 4989315778296697000,
"line_mean": 27.7258064516,
"line_max": 68,
"alpha_frac": 0.4862436833,
"autogenerated": false,
"ratio": 3.5197628458498023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9456715270887459,
"avg_score": 0.009858251652468435,
"num_lines": 62
} |
__all__ = ['summarize', 'unite', 'subtract_bg']
import numbers
import os
import sys
import traceback
import ipy_table
import matplotlib
import matplotlib.cm
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from mpl_toolkits.axes_grid import make_axes_locatable
from sastool.classes2 import Curve, Exposure
from sastool.libconfig import qunit
from sastool.misc.easylsq import FixedParameter, nonlinear_odr
from sastool.misc.errorvalue import ErrorValue
from .atsas import datcmp
from .calculation import correlmatrix
from .io import get_different_distances, load_exposure
from .plotting import plotsascurve
from .utils import print_abscissavalue, putlogo, writemarkdown
def _collect_data_for_summarization(headers, raw, reintegrate, qrange):
ip = get_ipython()
data1d = []
data2d = 0
headersout = []
if not headers:
return
for head in headers:
try:
mo = ip.user_ns['mask_override'](head)
except KeyError:
mo = None
ex = None
last_exception = None
try:
ex = load_exposure(head.fsn, raw=raw, processed=not raw)
assert isinstance(ex, Exposure)
if mo is not None:
try:
ex.mask = ex.loader.loadmask(mo)
except FileNotFoundError:
print('Could not load mask: %s' % mo)
raise FileNotFoundError('Could not load mask: %s' % mo)
except FileNotFoundError as exc:
last_exception = sys.exc_info()
if ex is None:
print('Could not load {} 2D file for FSN {:d}. Exception: {}'.format(
['processed', 'raw'][raw], head.fsn, '\n'.join(traceback.format_exception(*last_exception))))
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns'])
ip.user_ns['badfsns'].add(head.fsn)
continue
ex.header = head
curve = None
if not reintegrate:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed != raw]:
try:
curve = l.loadcurve(head.fsn)
break
except FileNotFoundError:
continue
if curve is None:
print('Cannot load curve for FSN %d: reintegrating.' % head.fsn)
if curve is None:
# this happens if reintegrate==True or if reintegrate==False but the curve could not be loaded.
curve = ex.radial_average(qrange, errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False)
curve = curve.sanitize()
data1d.append(curve)
data1d[-1].save(os.path.join(ip.user_ns['saveto_dir'], 'curve_%05d.txt' % head.fsn))
mat = np.zeros((len(data1d[-1]), 3))
mat[:, 0] = data1d[-1].q
mat[:, 1] = data1d[-1].Intensity
mat[:, 2] = data1d[-1].Error
np.savetxt(os.path.join(ip.user_ns['saveto_dir'], 'curve_%s_%05d.dat' % (head.title, head.fsn)), mat)
del mat
data2d = data2d + ex
headersout.append(ex.header)
data2d /= len(data1d)
return data1d, data2d, headersout
def _stabilityassessment(headers, data1d, dist, fig_correlmatrices, correlmatrixaxes, std_multiplier,
correlmatrix_colormap,
correlmatrix_filename, logarithmic_correlmatrix=True, cormaptest=True):
# calculate and plot correlation matrix
cmatrix, badidx, rowavg = correlmatrix(data1d, std_multiplier, logarithmic_correlmatrix)
rowavgmean = rowavg.mean()
rowavgstd = rowavg.std()
writemarkdown('#### Assessing sample stability')
writemarkdown("- Mean of row averages: " + str(rowavgmean))
writemarkdown("- Std of row averages: " + str(rowavgstd) + ' (%.2f %%)' % (rowavgstd / rowavgmean * 100))
img = correlmatrixaxes.imshow(cmatrix, interpolation='nearest', cmap=matplotlib.cm.get_cmap(correlmatrix_colormap))
cax = make_axes_locatable(correlmatrixaxes).append_axes('right', size="5%", pad=0.1)
fig_correlmatrices.colorbar(img, cax=cax)
fsns = [h.fsn for h in headers]
correlmatrixaxes.set_title('%.2f mm' % dist)
correlmatrixaxes.set_xticks(list(range(len(data1d))))
correlmatrixaxes.set_xticklabels([str(f) for f in fsns], rotation='vertical')
correlmatrixaxes.set_yticks(list(range(len(data1d))))
correlmatrixaxes.set_yticklabels([str(f) for f in fsns])
np.savez_compressed(correlmatrix_filename,
correlmatrix=cmatrix, fsns=np.array(fsns))
# Report table on sample stability
tab = [['FSN', 'Date', 'Discrepancy', 'Relative discrepancy ((x-mean(x))/std(x))', 'Quality', 'Quality (cormap)']]
badfsns = []
badfsns_datcmp = []
if cormaptest:
matC, matp, matpadj, datcmp_ok = datcmp(*data1d)
else:
datcmp_ok = [not x for x in badidx]
for h, bad, discr, dcmp_ok in zip(headers, badidx, rowavg, datcmp_ok):
tab.append([h.fsn, h.date.isoformat(), discr, (discr - rowavgmean) / rowavgstd,
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][bad],
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][dcmp_ok != 1]])
if bad:
badfsns.append(h.fsn)
if (not dcmp_ok and not np.isnan(dcmp_ok)):
badfsns_datcmp.append(h.fsn)
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
return badfsns, badfsns_datcmp, tab, rowavg
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg
def _merge_two_curves(curve1: Curve, curve2: Curve, qmin, qmax, qsep, use_additive_constant=False):
"""Merge two scattering curves
:param curve1: the first curve (longer distance)
:type curve1: sastool.classes.curve.GeneralCurve
:param curve2: the second curve (shorter distance)
:type curve2: sastool.classes.curve.GeneralCurve
:param qmin: lower bound of the interval for determining the scaling factor
:type qmin: float
:param qmax: upper bound of the interval for determining the scaling factor
:type qmax: float
:param qsep: separating (tailoring) point for the merge
:type qsep: float
:return: merged_curve, factor, background, stat
:rtype tuple of a sastool.classes2.curve.Curve and a float
"""
curve1=curve1.sanitize()
curve2=curve2.sanitize()
if len(curve1.trim(qmin, qmax)) > len(curve2.trim(qmin, qmax)):
curve2_interp = curve2.trim(qmin, qmax)
curve1_interp = curve1.interpolate(curve2_interp.q)
else:
curve1_interp = curve1.trim(qmin, qmax)
curve2_interp = curve2.interpolate(curve1_interp.q)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(curve2_interp.Intensity, curve1_interp.Intensity,
curve2_interp.Error, curve1_interp.Error,
lambda x, factor, bg: x * factor + bg, [1.0, bg_init])
return Curve.merge(curve1 - bg, curve2 * factor, qsep), factor, bg, stat
def _scale_two_exposures(exp1, exp2, qmin, qmax, N=10, use_additive_constant=False):
qrange = np.linspace(qmin, qmax, N)
rad1 = exp1.radial_average(qrange=qrange, raw_result=False)
rad2 = exp2.radial_average(qrange=qrange, raw_result=False)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(rad2.y, rad1.y, rad2.dy, rad1.dy, lambda x, factor, bg: x * factor + bg,
[1, bg_init])
return factor, bg
def unite(samplename, uniqmin=[], uniqmax=[], uniqsep=[], graph_ncols=2, graph_subplotpars={'hspace': 0.3},
graph_extension='png', graph_dpi=80, additive_constant=False):
ip = get_ipython()
if isinstance(uniqmin, numbers.Number):
uniqmin = [uniqmin]
if isinstance(uniqmax, numbers.Number):
uniqmax = [uniqmax]
if isinstance(uniqsep, numbers.Number):
uniqsep = [uniqsep]
data1d = ip.user_ns['_data1d'][samplename]
print("Uniting measurements of sample %s at different s-d distances" % samplename)
uniparams = {'qmin': uniqmin, 'qmax': uniqmax, 'qsep': uniqsep}
for p in uniparams:
uniparams[p] = uniparams[p] + [None] * \
max(0, len(data1d) - 1 - len(uniparams[p]))
dists = list(reversed(sorted(data1d.keys())))
if len(dists) < 2:
print("Less than two distances found for sample %s; no point of uniting." % samplename)
return
united = None
graph_nrows = int(
np.ceil((len(dists)) / (graph_ncols * 1.0)))
fig = plt.figure()
unitedaxis = fig.add_subplot(graph_nrows, graph_ncols, 1)
factor = 1.0
for idx, dist1, dist2, qmin, qmax, qsep in zip(list(range(len(dists) - 1)),
dists[:-1], dists[1:],
uniparams['qmin'],
uniparams['qmax'],
uniparams['qsep']):
print(" Scaling together distances %f and %f mm" % (dist1, dist2), flush=True)
if united is None:
united = data1d[dist1]
if qmin is None:
qmin = data1d[dist2].sanitize().q.min()
print(" Auto-detected qmin:", qmin, flush=True)
if qmax is None:
qmax = data1d[dist1].sanitize().q.max()
print(" Auto-detected qmax:", qmax, flush=True)
if qsep is None:
qsep = 0.5 * (qmin + qmax)
print(" Auto-detected qsep:", qsep, flush=True)
ax = fig.add_subplot(graph_nrows, graph_ncols, 2 + idx)
(factor * data1d[dist1]).loglog(axes=ax, label='%.2f mm' % dist1)
united, factor1, bg, stat = _merge_two_curves(united,
data1d[dist2], qmin, qmax, qsep,
use_additive_constant=additive_constant)
factor = factor1 * factor
uniparams['qmin'][idx] = qmin
uniparams['qmax'][idx] = qmax
uniparams['qsep'][idx] = qsep
print(" Scaling factor is", factor.tostring(), flush=True)
if not additive_constant:
print(" Additive constant has not been used.", flush=True)
else:
print(" Additive constant is:", bg.tostring(), flush=True)
print(" Reduced Chi^2 of the ODR fit:", stat['Chi2_reduced'], flush=True)
print(" DoF of the ODR fit:", stat['DoF'], flush=True)
(data1d[dist2] * factor + bg).loglog(axes=ax, label='%.2f mm' % dist2)
ax.set_xlabel('q (' + qunit() + ')')
ax.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
ax.legend(loc='best')
# ax.grid(which='both')
ax.axis('tight')
ax.set_title('Factor: ' + str(factor))
lims = ax.axis()
ax.plot([qmin, qmin], lims[2:], '--r', lw=2)
ax.plot([qmax, qmax], lims[2:], '--r', lw=2)
ax.plot([qsep, qsep], lims[2:], '--k')
ax.grid(True, which='both')
if '_data1dunited' not in ip.user_ns:
ip.user_ns['_data1dunited'] = {}
united.loglog(axes=unitedaxis)
unitedaxis.set_xlabel('q (' + qunit() + ')')
unitedaxis.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
unitedaxis.legend(loc='best')
unitedaxis.set_title('United scattering of %s' % samplename)
unitedaxis.grid(True, which='both')
# unitedaxis.grid(which='both')
unitedaxis.axis('tight')
lims = unitedaxis.axis()
for qs in uniparams['qsep']:
unitedaxis.plot([qs] * 2, lims[2:], '--r')
ip.user_ns['_data1dunited'][samplename] = united
putlogo()
fig.subplots_adjust(**graph_subplotpars)
plt.savefig(
os.path.join(ip.user_ns['auximages_dir'], 'uniting_' + samplename + '.' + graph_extension), dpi=graph_dpi)
print(" United curve spans the following ranges:")
print(" q_min: ",
print_abscissavalue(united.q.min(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max: ",
print_abscissavalue(united.q.max(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max/q_min:", united.q.max() / united.q.min())
print(" I_min: ", united.Intensity.min(), "cm^{-1}")
print(" I_max: ", united.Intensity.max(), "cm^{-1}")
print(" I_max/I_min:", united.Intensity.max() / united.Intensity.min())
print(" # of points: ", len(united))
united.save(os.path.join(ip.user_ns['saveto_dir'], 'united_' + samplename + '.txt'))
plt.show()
def subtract_bg(samplename, bgname, factor=1, distance=None, disttolerance=2,
subname=None, qrange=(), graph_extension='png', graph_dpi=80):
"""Subtract background from measurements.
Inputs:
samplename: the name of the sample
bgname: the name of the background measurements. Alternatively, it can
be a numeric value (float or ErrorValue), which will be subtracted.
If None, this constant will be determined by integrating the
scattering curve in the range given by qrange.
factor: the background curve will be multiplied by this
distance: if None, do the subtraction for all sample-to-detector distances.
Otherwise give here the value of the sample-to-detector distance.
qrange: a tuple (qmin, qmax)
disttolerance: the tolerance in which two distances are considered
equal.
subname: the sample name of the background-corrected curve. The default
is samplename + '-' + bgname
"""
ip = get_ipython()
data1d = ip.user_ns['_data1d']
data2d = ip.user_ns['_data2d']
if 'subtractedsamplenames' not in ip.user_ns:
ip.user_ns['subtractedsamplenames'] = set()
subtractedsamplenames = ip.user_ns['subtractedsamplenames']
if subname is None:
if isinstance(bgname, str):
subname = samplename + '-' + bgname
else:
subname = samplename + '-const'
if distance is None:
dists = data1d[samplename]
else:
dists = [d for d in data1d[samplename] if abs(d - distance) < disttolerance]
for dist in dists:
if isinstance(bgname, str):
if not disttolerance:
if dist not in data1d[bgname]:
print(
'Warning: Missing distance %g for background measurement (samplename: %s, background samplename: %s)' % (
dist, samplename, bgname))
continue
else:
bgdist = dist
else:
bgdist = sorted([(d, r) for (d, r) in [(d, np.abs(d - dist)) for d in list(data1d[bgname].keys())] if
r <= disttolerance], key=lambda x: x[1])[0][0]
if subname not in data1d:
data1d[subname] = {}
if subname not in data2d:
data2d[subname] = {}
if subname not in ip.user_ns['_headers_sample']:
ip.user_ns['_headers_sample'][subname] = {}
data1_s = data1d[samplename][dist]
data2_s = data2d[samplename][dist]
if isinstance(bgname, str):
data1_bg = data1d[bgname][bgdist]
data2_bg = data2d[bgname][bgdist]
if factor is None:
factor = data1_s.trim(*qrange).momentum(0) / data1_bg.trim(*qrange).momentum(0)
elif bgname is None:
data1_bg = data1_s.trim(*qrange).momentum(0)
data2_bg = data1_bg
else:
data1_bg = bgname
data2_bg = bgname
if factor is None:
factor = 1
data1d[subname][dist] = data1_s - factor * data1_bg
data2d[subname][dist] = data2_s - factor * data2_bg
data1d[subname][dist].save(
os.path.join(ip.user_ns['saveto_dir'], subname + '_' + ('%.2f' % dist).replace('.', '_') + '.txt'))
ip.user_ns['_headers_sample'][subname][dist] = ip.user_ns['_headers_sample'][samplename][
dist] # ugly hack, I have no better idea.
plt.figure()
plotsascurve(samplename, dist=dist)
if isinstance(bgname, str):
plotsascurve(bgname, dist=dist, factor=factor)
plotsascurve(subname, dist=dist)
plt.savefig(os.path.join(ip.user_ns['auximages_dir'],
'subtractbg_' + samplename + '.' + graph_extension),
dpi=graph_dpi)
subtractedsamplenames.add(subname)
| {
"repo_name": "awacha/credolib",
"path": "credolib/procedures.py",
"copies": "1",
"size": "28890",
"license": "bsd-3-clause",
"hash": 34032802577506990,
"line_mean": 46.9900332226,
"line_max": 129,
"alpha_frac": 0.5670474213,
"autogenerated": false,
"ratio": 3.55656777052813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9614339212908665,
"avg_score": 0.0018551957838931544,
"num_lines": 602
} |
__all__ = ['switch']
__version__ = '0.1'
class _case:
""" `_case` class realizes case statement in `switch`.
Is returned to context manager from `switch` class, and is used as
callable, or it's `default` method.
"""
def __init__(self, switch):
""" Initialize object
Arguments:
switch -- instance of `switch` from context manager
"""
self.switch = switch
def _get_args(self, args):
""" Build positional arguments for callback, using common
(defined in `switch`) and local (passed to `case`) arguments.
Arguments:
args -- iterable with arguments for callback
"""
return list(self.switch.callback_args) + list(args)
def _get_kwargs(self, kwargs):
""" Build keyword arguments for callback, using common
(defined in `switch`) and local (passed to `case`) arguments.
Arguments:
kwargs -- dictionary with arguments for callback
"""
ret = self.switch.callback_kwargs.copy()
ret.update(kwargs)
return ret
def __call__(self, assrt, callback=None, pass_through=False, *args, **kwargs):
""" Test `case` condition and if it's evaluated to true (or was evaluated
in previous statement, and is enabled `pass_through`), call defined
action.
Arguments:
assrt -- value passed to asserter (`switch`.`asserter`)
callback -- callback to execute if asserter returns True
pass_through -- do we have to continue to next `case` even if current
call exhausts switch
*args - additional args to pass to callback
**kwargs - additional args to pass to callback
"""
if self.switch.is_finished:
raise Exception('Using case aside of context manager it was definied is denied')
if self.switch.is_exhausted:
return
assrt = self.switch.asserter(assrt)
if assrt or self.switch.pass_through:
self.switch.pass_through = pass_through
## save info about resolved condition, if pass_through wais False
if not pass_through:
self.switch.is_exhausted = True
## call callback if given
if callback:
callback(self.switch.value, *self._get_args(args), **self._get_kwargs(kwargs))
def default(self, callback=None):
""" `default` statement in `switch` clause.
Arguments:
callback -- callback to call in `default` block
"""
if self.switch.is_exhausted:
return
if callback:
callback(self.switch.value)
class switch:
""" `switch` clause. For use in context manager.
Also common storage for all `case` statements.
"""
def __init__(self, value, asserter=None, args=None, kwargs=None):
""" Initializes data, and determine asserter in case when `asserter` is not
callable.
Arguments:
value -- value passed to asserter in `case` calls, also passed to callback from `case` block
asserter -- True: asserter will compare `value` with `assrt` (from `_case`)
args - list of arguments passed to every callback from `case` block
kwargs - dictionary of named arguments passed to every callback from `case` block
"""
self.value = value
self.is_exhausted = False
self.pass_through = False
self.callback_args = args or []
self.callback_kwargs = kwargs or {}
self.is_finished = False
if asserter is None:
self.asserter = bool
elif hasattr(asserter, '__call__'):
self.asserter = asserter
elif asserter == True:
self.asserter = lambda val: val == value
else:
raise TypeError('Can\'t use given asserter: %r' % (asserter, ))
def __enter__(self, *a, **b):
""" Realizes context manager API.
Returns _case instance with bounded `switch` (for common storage purposes)
"""
return _case(self)
def __exit__(self, *a, **b):
""" Mark switch as finished when exits from context manager.
"""
self.is_exhausted = True
self.is_finished = True
| {
"repo_name": "mysz/py-simpleswitch",
"path": "simpleswitch.py",
"copies": "2",
"size": "4376",
"license": "mit",
"hash": 5618020379700384000,
"line_mean": 34.008,
"line_max": 104,
"alpha_frac": 0.5804387569,
"autogenerated": false,
"ratio": 4.393574297188755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5974013054088755,
"avg_score": null,
"num_lines": null
} |
all_symbols = {}
all_words = {}
# Stores file information
class FileStats(object):
def __init__(self, file_path):
self.symbols = {}
self.words = {}
self.file_path = file_path
self.analyze_content()
def analyze_content(self):
# count symbols in file content
for char in open(self.file_path).read():
if not ord(char) in [9, 10, 32]:
# for a file
try:
self.symbols[ord(char)] += 1
except KeyError:
self.symbols[ord(char)] = 1
# for all files together
try:
all_symbols[ord(char)] += 1
except KeyError:
all_symbols[ord(char)] = 1
# count words in file content
for word in open(self.file_path).read().split():
if word:
# for a file
try:
self.words[word.lower()] += 1
except KeyError:
self.words[word.lower()] = 1
# for all files together
try:
all_words[word.lower()] += 1
except KeyError:
all_words[word.lower()] = 1
def get_counted_content(self):
result = self.file_path
result += '\n\t' + 'Symbols in file'
for symbol, times in self.symbols.items():
result += '\n\t\t' + chr(symbol) + ' ' + str(times)
result += '\n\n\t' + 'Words in file'
for word, times in self.words.items():
result += '\n\t\t' + word + ' ' + str(times)
return result
@staticmethod
def get_all_counted_content():
result = 'All symbols in all files'
for symbol, times in all_symbols.items():
result += '\n\t' + chr(symbol) + ' ' + str(times)
result += '\n\nAll words in all files'
for word, times in all_words.items():
result += '\n\t' + word + ' ' + str(times)
return result | {
"repo_name": "aistis-/symbol-counting",
"path": "FileStats.py",
"copies": "1",
"size": "2054",
"license": "mit",
"hash": -5794981211948427000,
"line_mean": 26.7702702703,
"line_max": 63,
"alpha_frac": 0.470788705,
"autogenerated": false,
"ratio": 4.149494949494949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011261261261261261,
"num_lines": 74
} |
__all__ = ["Synchronizer", "NameLock", "_threading", "_thread"]
import os
import sys
import tempfile
try:
import thread as _thread
import threading as _threading
except ImportError:
import dummy_thread as _thread
import dummy_threading as _threading
# check for fcntl module
try:
sys.getwindowsversion()
has_flock = False
except:
try:
import fcntl
has_flock = True
except ImportError:
has_flock = False
from beaker import util
from beaker.exceptions import LockError
class NameLock(object):
"""a proxy for an RLock object that is stored in a name
based registry.
Multiple threads can get a reference to the same RLock based on
the name alone, and synchronize operations related to that name.
"""
locks = util.WeakValuedRegistry()
class NLContainer:
"""cant put Lock as a weakref"""
def __init__(self, reentrant):
if reentrant:
self.lock = _threading.RLock()
else:
self.lock = _threading.Lock()
def __call__(self):
return self.lock
def __init__(self, identifier = None, reentrant = False):
self.lock = self._get_lock(identifier, reentrant)
def acquire(self, wait = True):
return self.lock().acquire(wait)
def release(self):
self.lock().release()
def _get_lock(self, identifier, reentrant):
if identifier is None:
return NameLock.NLContainer(reentrant)
return NameLock.locks.get(identifier, lambda: NameLock.NLContainer(reentrant))
class Synchronizer(object):
"""a read-many/single-writer synchronizer which globally synchronizes on a given string name."""
conditions = util.WeakValuedRegistry()
def __init__(self, identifier = None, use_files = False, lock_dir = None, digest_filenames = True):
if not has_flock:
use_files = False
if use_files:
syncs = Synchronizer.conditions.sync_get("file_%s" % identifier, lambda:util.ThreadLocal(creator=lambda: FileSynchronizer(identifier, lock_dir, digest_filenames)))
self._get_impl = lambda:syncs.get()
else:
condition = Synchronizer.conditions.sync_get("condition_%s" % identifier, lambda: ConditionSynchronizer(identifier))
self._get_impl = lambda:condition
def release_read_lock(self):
self._get_impl().release_read_lock()
def acquire_read_lock(self, wait=True):
return self._get_impl().acquire_read_lock(wait=wait)
def acquire_write_lock(self, wait=True):
return self._get_impl().acquire_write_lock(wait=wait)
def release_write_lock(self):
self._get_impl().release_write_lock()
class SyncState(object):
"""used to track the current thread's reading/writing state as well as reentrant block counting."""
def __init__(self):
self.reentrantcount = 0
self.writing = False
self.reading = False
class SynchronizerImpl(object):
"""base class for synchronizers. the release/acquire methods may or may not be threadsafe
depending on whether the 'state' accessor returns a thread-local instance."""
def release_read_lock(self):
state = self.state
if state.writing: raise LockError("lock is in writing state")
if not state.reading: raise LockError("lock is not in reading state")
if state.reentrantcount == 1:
self.do_release_read_lock()
state.reading = False
state.reentrantcount -= 1
def acquire_read_lock(self, wait = True):
state = self.state
if state.writing: raise LockError("lock is in writing state")
if state.reentrantcount == 0:
x = self.do_acquire_read_lock(wait)
if (wait or x):
state.reentrantcount += 1
state.reading = True
return x
elif state.reading:
state.reentrantcount += 1
return True
def release_write_lock(self):
state = self.state
if state.reading: raise LockError("lock is in reading state")
if not state.writing: raise LockError("lock is not in writing state")
if state.reentrantcount == 1:
self.do_release_write_lock()
state.writing = False
state.reentrantcount -= 1
def acquire_write_lock(self, wait = True):
state = self.state
if state.reading: raise LockError("lock is in reading state")
if state.reentrantcount == 0:
x = self.do_acquire_write_lock(wait)
if (wait or x):
state.reentrantcount += 1
state.writing = True
return x
elif state.writing:
state.reentrantcount += 1
return True
def do_release_read_lock(self):
raise NotImplementedError()
def do_acquire_read_lock(self):
raise NotImplementedError()
def do_release_write_lock(self):
raise NotImplementedError()
def do_acquire_write_lock(self):
raise NotImplementedError()
class FileSynchronizer(SynchronizerImpl):
"""a synchronizer using lock files.
as it relies upon flock, its inherently not threadsafe. The
Synchronizer container will maintain a unique FileSynchronizer per Synchronizer instance per thread.
This works out since the synchronizers are all locking on a file on the filesystem.
"""
def __init__(self, identifier, lock_dir, digest_filenames):
self.state = SyncState()
if lock_dir is None:
lock_dir = tempfile.gettempdir()
else:
lock_dir = lock_dir
self.filename = util.encoded_path(lock_dir, [identifier], extension = '.lock', digest = digest_filenames)
self.opened = False
self.filedesc = None
def _open(self, mode):
if not self.opened:
self.filedesc = os.open(self.filename, mode)
self.opened = True
def do_acquire_read_lock(self, wait):
self._open(os.O_CREAT | os.O_RDONLY)
if not wait:
try:
fcntl.flock(self.filedesc, fcntl.LOCK_SH | fcntl.LOCK_NB)
ret = True
except IOError:
ret = False
return ret
else:
fcntl.flock(self.filedesc, fcntl.LOCK_SH)
return True
def do_acquire_write_lock(self, wait):
self._open(os.O_CREAT | os.O_WRONLY)
if not wait:
try:
fcntl.flock(self.filedesc, fcntl.LOCK_EX | fcntl.LOCK_NB)
ret = True
except IOError:
ret = False
return ret
else:
fcntl.flock(self.filedesc, fcntl.LOCK_EX);
return True
def do_release_read_lock(self):
self.release_all_locks()
def do_release_write_lock(self):
self.release_all_locks()
def release_all_locks(self):
if self.opened:
fcntl.flock(self.filedesc, fcntl.LOCK_UN)
os.close(self.filedesc)
self.opened = False
def __del__(self):
if not os:
# os module has already been GCed
return
if os.access(self.filename, os.F_OK):
try:
os.remove(self.filename)
except OSError:
# occasionally another thread beats us to it
pass
class ConditionSynchronizer(SynchronizerImpl):
"""a synchronizer using a Condition.
this synchronizer is based on threading.Lock() objects and
therefore must be shared among threads, so it is also threadsafe.
the "state" variable referenced by the base SynchronizerImpl class
is turned into a thread local, and all the do_XXXX methods are synchronized
on the condition object.
The Synchronizer container will maintain a registry of ConditionSynchronizer
objects keyed to the name of the synchronizer.
"""
def __init__(self, identifier):
self.tlocalstate = util.ThreadLocal(creator = lambda: SyncState())
# counts how many asynchronous methods are executing
self.async = 0
# pointer to thread that is the current sync operation
self.current_sync_operation = None
# condition object to lock on
self.condition = _threading.Condition(_threading.Lock())
state = property(lambda self: self.tlocalstate())
def do_acquire_read_lock(self, wait = True):
self.condition.acquire()
# see if a synchronous operation is waiting to start
# or is already running, in which case we wait (or just
# give up and return)
if wait:
while self.current_sync_operation is not None:
self.condition.wait()
else:
if self.current_sync_operation is not None:
self.condition.release()
return False
self.async += 1
self.condition.release()
if not wait: return True
def do_release_read_lock(self):
self.condition.acquire()
self.async -= 1
# check if we are the last asynchronous reader thread
# out the door.
if self.async == 0:
# yes. so if a sync operation is waiting, notifyAll to wake
# it up
if self.current_sync_operation is not None:
self.condition.notifyAll()
elif self.async < 0:
raise LockError("Synchronizer error - too many release_read_locks called")
self.condition.release()
def do_acquire_write_lock(self, wait = True):
self.condition.acquire()
# here, we are not a synchronous reader, and after returning,
# assuming waiting or immediate availability, we will be.
if wait:
# if another sync is working, wait
while self.current_sync_operation is not None:
self.condition.wait()
else:
# if another sync is working,
# we dont want to wait, so forget it
if self.current_sync_operation is not None:
self.condition.release()
return False
# establish ourselves as the current sync
# this indicates to other read/write operations
# that they should wait until this is None again
self.current_sync_operation = _threading.currentThread()
# now wait again for asyncs to finish
if self.async > 0:
if wait:
# wait
self.condition.wait()
else:
# we dont want to wait, so forget it
self.current_sync_operation = None
self.condition.release()
return False
self.condition.release()
if not wait: return True
def do_release_write_lock(self):
self.condition.acquire()
if self.current_sync_operation != _threading.currentThread():
raise LockError("Synchronizer error - current thread doesnt have the write lock")
# reset the current sync operation so
# another can get it
self.current_sync_operation = None
# tell everyone to get ready
self.condition.notifyAll()
# everyone go !!
self.condition.release()
| {
"repo_name": "carlgao/lenga",
"path": "images/lenny64-peon/usr/share/python-support/python-beaker/beaker/synchronization.py",
"copies": "3",
"size": "11680",
"license": "mit",
"hash": -2791819739071680000,
"line_mean": 30.4824797844,
"line_max": 175,
"alpha_frac": 0.5886130137,
"autogenerated": false,
"ratio": 4.422567209390382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6511180223090383,
"avg_score": null,
"num_lines": null
} |
__all__ = ['System']
from typing import List, Dict, FrozenSet, Any, Type
import warnings
import logging
import bugzoo
from bugzoo.client import Client as BugZooClient
from .command import Command
from .configuration import Configuration
from .sandbox import Sandbox
from .state import Variable, State
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
_NAME_TO_SYSTEM_TYPE = {} # type: Dict[str, Type[System]]
class SystemMeta(type):
def __new__(mcl,
cls_name: str,
bases, # FIXME
ns: Dict[str, Any]
):
# is this an abstract class?
if 'is_abstract' in ns:
is_abstract = ns['is_abstract']
if not isinstance(ns['is_abstract'], bool):
tpl = "Unexpected type for 'is_abstract' property: {}"
typ = type(ns['is_abstract']).__name__
msg = "expected 'bool' but was '{}'".format(typ)
msg = tpl.format(msg)
raise TypeError(msg)
else:
is_abstract = False
# construct an immutable "is_abstract" property
ns['is_abstract'] = is_abstract
if not is_abstract:
if 'name' not in ns:
msg = "System class definition is missing 'name' property"
raise TypeError(msg)
sys_name = ns['name']
if not isinstance(sys_name, str):
msg = "was {} but should be str".format(type(sys_name))
msg = "Unexpected type for 'name' property: {}".format(msg)
raise TypeError(msg)
if sys_name == '':
msg = "System name must be a non-empty string."
raise TypeError(msg)
ns['name'] = sys_name
if 'state' not in ns:
msg = "System class definition is missing 'state' property"
raise TypeError(msg)
if not issubclass(ns['state'], State):
typ = ns['state'].__name__
msg = "was {} but should be a subclass of State".format(typ)
msg = "Unexpected class for 'state' property: {}".format(msg)
raise TypeError(msg)
if 'sandbox' not in ns:
msg = "System class definition is missing 'sandbox' property"
raise TypeError(msg)
if not issubclass(ns['sandbox'], Sandbox):
typ = ns['sandbox'].__name__
msg = "was {} but should be a subclass of Sandbox".format(typ)
msg = "Unexpected class for 'sandbox' property: {}".format(msg)
raise TypeError(msg)
if 'commands' not in ns:
msg = "System class definition is missing 'commands' property"
raise TypeError(msg)
if not isinstance(ns['commands'], list) \
or any(not issubclass(x, Command) for x in ns['commands']):
msg = "was {} but should be List[Type[Command]]"
msg = msg.format(ns['commands'])
msg = "Unexpected type for 'commands' property: {}".format(msg)
# TODO convert to a frozen dictionary
ns['commands'] = {c.name: c for c in ns['commands']}
if 'configuration' not in ns:
msg = "System class definition is missing 'configuration' property" # noqa: pycodestyle
raise TypeError(msg)
if not issubclass(ns['configuration'], Configuration):
msg = "was {} but should be a subclass of Configuration"
msg = msg.format(ns['configuration'].__name__)
tpl = "Unexpected class for 'configuration' property: {}"
msg = tpl.format(msg)
raise TypeError(msg)
return super().__new__(mcl, cls_name, bases, ns)
def __init__(cls, cls_name: str, bases, ns: Dict[str, Any]):
if cls.is_abstract:
return super().__init__(cls_name, bases, ns)
# register system type
if cls.name in _NAME_TO_SYSTEM_TYPE:
msg = "System already registered under given name: %s"
msg = msg.format(cls.name)
raise TypeError(msg)
_NAME_TO_SYSTEM_TYPE[cls.name] = cls
return super().__init__(cls_name, bases, ns)
class System(object, metaclass=SystemMeta):
"""
Instances of the System class are used to provide a description of a
system-under-test, and to provide an interface for safely interacting
with the system-under-test in an idempotent manner.
"""
is_abstract = True
@staticmethod
def get_by_name(name: str) -> 'Type[System]':
"""
Attempts to find the type definition for a system with a given name.
Raises:
KeyError: if no system type is registered under the given name.
"""
return _NAME_TO_SYSTEM_TYPE[name]
| {
"repo_name": "squaresLab/Houston",
"path": "houston/system.py",
"copies": "1",
"size": "4934",
"license": "mit",
"hash": -57739860505749840,
"line_mean": 37.2480620155,
"line_max": 104,
"alpha_frac": 0.5545196595,
"autogenerated": false,
"ratio": 4.347136563876652,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5401656223376652,
"avg_score": null,
"num_lines": null
} |
__all__ = ["TableDesc", "Row", "Table"]
class TableDesc(object):
__slots__ = ["__nameFromIdx", "__idxFromName", "__getitem__", "idxFromName"]
def __init__(self, *colNames):
self.__nameFromIdx = colNames
self.__idxFromName = {}
for n, name in enumerate(colNames):
assert isinstance(name, basestring)
self.__idxFromName[name] = n
self.idxFromName = self.__idxFromName.__getitem__
self.__getitem__ = self.__nameFromIdx.__getitem__
def __len__(self):
return len(self.__nameFromIdx)
def __iter__(self):
return iter(self.__nameFromIdx)
def __repr__(self):
return "TableDesc%r" % tuple(self)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, TupleDesc):
return False
return self.__nameFromIdx == other.__nameFromIdx
def __ne__(self, other):
# Geez.
return not (self == other)
def __hash__(self):
return hash(self.__nameFromIdx)
class Row(object):
__slots__ = ["__desc", "__data", "__length_hint__",
"__getitem__", "__iter__", "__getslice__"]
def __init__(self, desc, data):
self.__desc = desc
self.__data = tuple(data)
# Shh, don't tell anyone
self.__length_hint__ = len(self.__data)
# Forward methods
self.__getitem__ = self.__data.__getitem__
self.__iter__ = self.__data.__iter__
self.__getslice__ = self.__data.__getslice__
def __getattr__(self, name):
try:
idx = self.__desc.idxFromName(name)
except KeyError:
return object.__getattr__(self, name)
return self.__data[idx]
def __str__(self):
out = ["{"]
for n, name in enumerate(self.__desc):
if n != 0:
out.append(", ")
out.append(name)
out.append(": %r" % self.__data[n])
out.append("}")
return "".join(out)
def __repr__(self):
return "Row(%r, %r)" % (self.__desc, self.__data)
def __eq__(self, other):
if not isinstance(other, Row):
return False
return self.__desc == other.__desc and self.__data == other.__data
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.__desc) ^ hash(self.__data)
class Table(object):
def __init__(self, desc, extra = None):
self.__desc = desc
if extra == None:
extra = {}
self.__extra = extra
assert hasattr(self, "tuples")
# Accessors
@property
def desc(self):
return self.__desc
@property
def extra(self):
return self.__extra
def __iter__(self):
for row in self.tuples():
yield Row(self.__desc, row)
# Constructors
@staticmethod
def fromIterable(iterable, desc, extra = None):
"""Construct a table from an iterable object. This object
must support repeated iteration (it must not be, for example,
a generator)."""
return IterTable(iterable, desc, extra)
def __compileProj(self, proj, varDict, colSet = None):
if isinstance(proj, tuple):
name, proj = proj
haveName = True
else:
haveName = False
if isinstance(proj, basestring):
idx = self.__desc.idxFromName(proj)
if colSet != None:
colSet.add(idx)
return name if haveName else proj, "i[%d]" % idx
if isinstance(proj, int):
if colSet != None:
colSet.add(proj)
return name if haveName else self.__desc[proj], "i[%d]" % proj
if hasattr(proj, "__call__"):
if not haveName or not name:
raise ValueError("Function projection must provide column name")
fvar = "_%d" % len(varDict)
varDict[fvar] = proj
# XXX Create just one row object
return name, "%s(Row(desc, i))" % fvar
raise ValueError("Projection of unknown type %s" % type(proj))
def project(self, *projs):
"""Construct a new table by projecting each row of this table
into a new row. Each argument specifies a projection that
will produce zero or more columns in the output table. Each
argument is a tuple of the form (output column name,
projection). A basic projection is a string or integer, in
which case that column will be copied verbatim into the output
table. For basic projections, the output column name may be
omitted. The projection can also be a function that takes a
Row object and returns the value for that column in the new
row.
An argument may also be the string "*", which stands for all
columns of this table that have not appeared as a basic
projection in some other argument. To suppress columns from
appearing in the output table despite "*", list them with None
as their output column name."""
varDict = {"Row" : Row, "desc" : self.__desc,
"__builtins__" : __builtins__}
colSet = set()
exprs = [self.__compileProj(proj, varDict, colSet)
if proj != "*" else ("*", None)
for proj in projs]
# Expand "*"'s and build output column names
outNames = []
outExprs = []
for outName, expr in exprs:
if outName == None:
pass
elif outName == "*":
for n, col in enumerate(self.__desc):
if n not in colSet:
outNames.append(col)
outExprs.append("i[%d]," % n)
else:
outNames.append(outName)
outExprs.append(expr + ",")
code = compile("(" + "".join(outExprs) + ")", "<generated>", "eval")
return ProjectionTable(self, code, varDict, TableDesc(*outNames))
# Rendering
def renderText(self):
return "\n".join(toText(self))
class IterTable(Table):
def __init__(self, iterable, desc, extra):
Table.__init__(self, desc, extra)
self.__it = iterable
def tuples(self):
return iter(self.__it)
def __len__(self):
return len(self.__it)
class ProjectionTable(Table):
def __init__(self, base, code, varDict, desc):
Table.__init__(self, desc, base.extra)
self.__base = base
self.__code = code
self.__varDict = varDict
def tuples(self):
for tup in self.__base.tuples():
self.__varDict["i"] = tup
yield eval(self.__code, self.__varDict)
def __len__(self):
return len(self.base)
def toText(x):
if x == None:
return [""]
if isinstance(x, basestring):
return x.split("\n")
if isinstance(x, int) or isinstance(x, long):
return [str(x)]
if isinstance(x, float):
return ["%g" % x]
if isinstance(x, list):
res = []
for e in x:
lines = toText(e)
for i, l in enumerate(lines):
if i == 0:
res.append("- " + l)
else:
res.append(" " + l)
return res
if isinstance(x, tuple):
liness = map(toText, x)
if all(len(lines) == 1 for lines in liness):
return [", ".join(lines[0] for lines in liness)]
raise ValueError("Can't render multi-line value in tuple")
if isinstance(x, Table):
# Get alignment hints
align = []
for colName in x.desc:
a = x.extra.get(colName + "Align", "l")
if a == "l":
align.append(str.ljust)
elif a == "r":
align.append(str.rjust)
else:
raise ValueError("Unknown alignment type %s" % a)
# Textify all cells
cells = [[[col] for col in x.desc]]
if len(x.desc):
cells[0][0][0] = "# " + cells[0][0][0]
for r in x.tuples():
cells.append(map(toText, r))
# Figure out column spacing
widths = [0] * len(x.desc)
for row in cells:
for i, cell in enumerate(row):
if len(cell):
cellWidth = max(len(line) for line in cell)
else:
cellWidth = 0
widths[i] = max(widths[i], cellWidth)
# Add one space of padding between columns
widths = [w + 1 for w in widths]
# Render rows
res = []
for row in cells:
height = max(len(lines) for lines in row)
for n in range(height):
resLine = []
for i, cell in enumerate(row):
line = cell[n] if n < len(cell) else ""
resLine.append(align[i](line, widths[i]))
res.append("".join(resLine))
return res
raise ValueError("Don't know how to render %s" % type(x))
| {
"repo_name": "KMU-embedded/mosbench-ext",
"path": "mparts/table.py",
"copies": "1",
"size": "9105",
"license": "mit",
"hash": -6259416222173680000,
"line_mean": 31.8700361011,
"line_max": 80,
"alpha_frac": 0.5185063152,
"autogenerated": false,
"ratio": 4.097659765976598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5116166081176597,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Table']
import os
import sys
from pprint import pprint
from collections import defaultdict
from .column import Column
from .schema import Schema
from .memtable import MemTable
from .sstable import SSTable
from .query import Query
from .deferred import Deferred
from .expr import Expr
class Table(object):
MEMTABLE_LIMIT_N_ITEMS = 100
def __init__(self, db, table_name, type_fields=None):
self.store = db.store
self.db = db
self.table_name = table_name
self.opened = False
# schema
self.schema = Schema(self, type_fields)
# memtable
self.memtable = MemTable(self)
# sstables
self.sstables = []
table_path = self.get_path()
for filename in os.listdir(table_path):
if not filename.startswith('sstable-'):
continue
s = filename.index('sstable-') + len('sstable-')
e = filename.index('.data')
t = filename[s:e]
# sstable
sst = SSTable(self, t)
sst.open() # FIXME: lazy open in SSTable only if required
self.sstables.append(sst)
def __getattr__(self, attr):
c = getattr(self.schema, attr)
return c
def get_path(self):
return os.path.join(self.db.get_path(), self.table_name)
def is_opened(self):
return self.opened
def open(self):
self.opened = True
def close(self):
for sst in self.sstables:
if sst.is_opened():
sst.close()
self.opened = False
def commit_if_required(self):
if len(self.memtable) >= self.MEMTABLE_LIMIT_N_ITEMS:
self.commit()
def commit(self):
# get sorted rows by primary_key
columns = self.schema.primary_key
rows = self.memtable.get_sorted_rows(columns)
# create new sstable
sst = SSTable(self, rows=rows)
sst.open()
self.sstables.append(sst)
# clear memtable
self.memtable = MemTable(self)
@property
def query(self):
q = Query(self.db.store)
return q
def insert(self, **row):
# tx
tx = self.store.get_current_transaction()
tx.log((self.db, self, Table._commit_insert, (self,), row))
def _commit_insert(self, **row):
# check if all columns exist in table's schema
# compare against schema
for k, v in row.items():
if k not in self.schema:
raise Exception(
'filed %r is not defined in schema for table %r' % (
k,
self.table_name,
)
)
# set default columns to None
for k, v in self.schema:
if k not in row:
row[k] = None
# build key
key = tuple(row[k] for k in self.schema.primary_key)
# insert key
self.memtable.set(key, row)
# commit if required
self.commit_if_required()
def get(self, *args):
# key
key = args
# deferred
d = Deferred()
# tx
tx = self.store.get_current_transaction()
tx.log((self.db, self.table, Table._commit_get, (self, d, key), {}))
return d
def _commit_get(self, d, key):
v, op, sp = self._get(key)
d.set(v)
def select(self, *args):
# deferred, queue
d = Deferred()
q = Query(self.store, d)
# tx
tx = self.store.get_current_transaction()
tx.log((self.db, self.table, Table._commit_select, (self, d, q), {}))
return q
def _cmp_ranges(self, a, b):
ac, aop, av = a
bc, bop, bv = b
if ac == bc:
if aop in ('<', '<=') and bop in ('<', '<='):
return cmp(av, bv)
elif aop in ('>=', '>') and bop in ('>', '>='):
return cmp(av, bv)
elif aop in ('>=', '>') and bop in ('<', '<='):
if av < bv:
return -1
elif av > bv:
return 1
else:
return 0
elif aop in ('<', '<=') and bop in ('>=', '>'):
if av < bv:
return 0
elif av > bv:
return 1
else:
return 0
else:
return 0
else:
return cmp(ac, bc)
def _eval_expr(self, expr):
# print '_eval_expr:', expr
ranges = []
if expr.op == 'and':
l_ranges = self._eval_expr(expr.left)
r_ranges = self._eval_expr(expr.right)
_ranges = list(set(l_ranges + r_ranges))
_ranges.sort(cmp=lambda a, b: self._cmp_ranges(a, b))
# print '_ranges[0]:', _ranges
# group by column name
ranges_by_column = {}
for r in _ranges:
c, op, v = r
if c in ranges_by_column:
rs = ranges_by_column[c]
rs.append(r)
else:
ranges_by_column[c] = [r]
# print 'ranges_by_column[0]:', ranges_by_column
# optimize ranges by column
for c, rs in ranges_by_column.items():
if len(rs) == 1:
continue
_rs = []
for a, b in zip(rs[:-1], rs[1:]):
ac, aop, av = a
bc, bop, bv = b
if aop == '<':
if bop == '<':
r = (ac, '<', min(av, bv))
_rs.append(r)
elif bop == '<=':
if av <= bv:
_rs.append(a)
else:
_rs.append(b)
elif bop == '==':
if av > bv:
_rs.append(b)
elif bop == '>=':
if av > bv:
_rs.append(b)
_rs.append(a)
elif bop == '>':
if av > bv:
_rs.append(b)
_rs.append(a)
elif aop == '<=':
if bop == '<':
if av < bv:
_rs.append(a)
else:
_rs.append(b)
elif bop == '<=':
r = (ac, '<=', min(av, bv))
_rs.append(r)
elif bop == '==':
if av >= bv:
_rs.append(b)
elif bop == '>=':
if av == bv:
r = (ac, '==', av)
_rs.append(r)
elif av > bv:
_rs.append(b)
_rs.append(a)
elif bop == '>':
if av > bv:
_rs.append(b)
_rs.append(a)
elif aop == '==':
if bop == '<':
if av < bv:
_rs.append(a)
elif bop == '<=':
if av <= bv:
_rs.append(a)
elif bop == '==':
if av == bv:
_rs.append(a)
elif bop == '>=':
if av >= bv:
_rs.append(a)
elif bop == '>':
if av > bv:
_rs.append(a)
elif aop == '>=':
if bop == '<':
if av < bv:
_rs.append(a)
_rs.append(b)
elif bop == '<=':
if av == bv:
r = (ac, '==', av)
_rs.append(r)
elif av < bv:
_rs.append(a)
_rs.append(b)
elif bop == '==':
if av == bv:
_rs.append(b)
elif bop == '>=':
r = (ac, '>=', max(av, bv))
_rs.append(r)
elif bop == '>':
if av <= bv:
_rs.append(b)
else:
_rs.append(a)
elif aop == '>':
if bop == '<':
if av < bv:
_rs.append(a)
_rs.append(b)
elif bop == '<=':
if av < bv:
_rs.append(a)
_rs.append(b)
elif bop == '==':
if av < bv:
_rs.append(b)
elif bop == '>=':
if av < bv:
_rs.append(b)
else:
_rs.append(a)
elif bop == '>':
r = (ac, '>=', max(av, bv))
_rs.append(r)
ranges_by_column[c] = _rs
# print 'ranges_by_column[1]:', ranges_by_column
_ranges = []
for c in sorted(ranges_by_column.keys()):
rs = ranges_by_column[c]
_ranges.extend(rs)
# print '_ranges[1]:', _ranges
ranges.extend(_ranges)
elif expr.op == 'or':
# FIXME: OR is not planned for now, but have it in mind
pass
else:
k = (expr.right,)
cs = (expr.left.name,)
if expr.op == '==':
v, op, sp = self._get(k, cs)
elif expr.op == '<':
v, op, sp = self._get_lt(k, cs)
elif expr.op == '<=':
v, op, sp = self._get_le(k, cs)
elif expr.op == '>':
v, op, sp = self._get_gt(k, cs)
elif expr.op == '>=':
v, op, sp = self._get_ge(k, cs)
ranges.append((cs[0], expr.op, k[0]))
return ranges
def _commit_select(self, d, q):
rows = [] # get from keys
keys = [] # get from ranges
ranges = self._eval_expr(q.where_clause)
print 'ranges:', ranges
ranges_by_column = {}
for r in ranges:
c, op, v = r
if c in ranges_by_column:
rs = ranges_by_column[c]
rs.append(r)
else:
ranges_by_column[c] = [r]
print 'ranges_by_column:', ranges_by_column
keys_by_column = {}
for c in sorted(ranges_by_column.keys()):
rs = ranges_by_column[c]
if len(rs) == 1:
c, op, v = rs[0]
key = (v,)
columns = (c,)
# row, offset_pos, sstable_pos = self._get(key, columns)
elif len(rs) == 2:
a, b = rs
ac, aop, av = a
bc, bop, bv = b
else:
raise ValueError
d.set(rows)
def _get(self, key, columns=None):
try:
v, op, sp = self.memtable.get(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
def _get_eq(self, key, columns=None):
try:
v, op, sp = self.memtable.get(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
def _get_lt(self, key, columns=None):
try:
v, op, sp = self.memtable.get_lt(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get_lt(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
def _get_le(self, key, columns=None):
try:
v, op, sp = self.memtable.get_le(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get_le(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
def _get_gt(self, key, columns=None):
try:
v, op, sp = self.memtable.get_gt(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get_gt(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
def _get_ge(self, key, columns=None):
try:
v, op, sp = self.memtable.get_ge(key, columns)
except KeyError as e:
for sst in reversed(self.sstables):
try:
v, op, sp = sst.get_ge(key, columns)
break
except KeyError as e:
pass
else:
raise KeyError
return v, op, sp
| {
"repo_name": "yadb/yadb",
"path": "backup/store/table.py",
"copies": "1",
"size": "14645",
"license": "mit",
"hash": 2001666128140889900,
"line_mean": 29.7668067227,
"line_max": 77,
"alpha_frac": 0.3640150222,
"autogenerated": false,
"ratio": 4.392621475704859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002503822041637168,
"num_lines": 476
} |
__all__ = ['Tag', 'DataTag', 'TreeTag', 'ExplicitTag',
'parse_rbml_data', 'load_tags']
class Tag:
def __init__(self, reader, _tag_map):
self.bin = reader.clone()
@classmethod
def read_len(klass, reader):
return read_vuint(reader)
@property
def tag_name(self):
return type(self).__name__
def __repr__(self):
if self.bin.size() <= 16:
return '%s(%r)' % (self.tag_name, self.bin.data)
return '%s(..%d)' % (self.tag_name, self.bin.size())
__str__ = __repr__
class DataTag(Tag):
@property
def value(self):
result = 0
for c in self.bin.data:
result = (result<<8)+c
return result
class TreeTag(Tag):
def __init__(self, reader, tag_map):
super().__init__(reader, tag_map)
tag = type(self)
if tag in TAG_STACK:
RECURSED.append(TAG_STACK[:])
TAG_STACK.append(tag)
self.children = parse_rbml_data(reader.clone(), tag_map)
TAG_STACK.pop()
def child_tagged(self, child_tag):
for child in self.children:
if isinstance(child, child_tag):
return child
def __repr__(self):
return '%s(%d children)' % (self.tag_name, len(self.children))
__str__ = __repr__
class ExplicitTag(DataTag):
@classmethod
def read_len(klass, _reader):
return klass.explicit_len
def read_vuint(reader):
fst = reader.read(1)[0]
if fst >= 0x80:
return fst & 0x7f
elif fst >= 0x40:
snd = reader.read(1)[0]
return ((fst & 0x3f)<<8)+snd
elif fst >= 0x20:
snd = reader.read(1)[0]
trd = reader.read(1)[0]
return ((fst & 0x1f)<<16)+(snd<<8)+trd
elif fst >= 0x10:
snd = reader.read(1)[0]
trd = reader.read(1)[0]
fth = reader.read(1)[0]
return ((fst & 0x0f)<<24)+(snd<<16)+(trd<<8)+fth
else:
raise ValueError('invalid code %d' % fst)
def read_rbml_tag(reader):
fst = reader.read(1)[0]
if fst < 0xf0:
return fst
elif fst == 0xf0:
raise ValueError('overlong form')
else:
snd = reader.read(1)[0]
return ((fst&0x0f)<<8)+snd
TAG_STACK = []
RECURSED = []
PARSED = []
def parse_rbml_data(reader, tag_map):
children = []
while reader.size():
tag_id = read_rbml_tag(reader)
tag = tag_map[tag_id]
data_len = tag.read_len(reader)
data = reader.sublet(data_len)
inner = tag(data, tag_map)
PARSED.append(inner)
children.append(inner)
return children
class RbmlTags:
class ExplicitU8(ExplicitTag):
tag = 0x00; explicit_len = 1
class ExplicitU16(ExplicitTag):
tag = 0x01; explicit_len = 2
class ExplicitU32(ExplicitTag):
tag = 0x02; explicit_len = 4
class ExplicitU64(ExplicitTag):
tag = 0x03; explicit_len = 8
class ExplicitI8(ExplicitTag):
tag = 0x04; explicit_len = 1
class ExplicitI16(ExplicitTag):
tag = 0x05; explicit_len = 2
class ExplicitI32(ExplicitTag):
tag = 0x06; explicit_len = 4
class ExplicitI64(ExplicitTag):
tag = 0x07; explicit_len = 8
class ExplicitBool(ExplicitTag):
tag = 0x08; explicit_len = 1
class ExplicitChar(ExplicitTag):
tag = 0x09; explicit_len = 4
class ExplicitF32(ExplicitTag):
tag = 0x0a; explicit_len = 4
class ExplicitF64(ExplicitTag):
tag = 0x0b; explicit_len = 8
class ExplicitSub8(ExplicitTag):
tag = 0x0c; explicit_len = 1
class ExplicitSub32(ExplicitTag):
tag = 0x0d; explicit_len = 4
# 0x0e-0x10 HOLE
class String(DataTag): tag = 0x10
class Enum(TreeTag): tag = 0x11
class Vec(TreeTag): tag = 0x12
class VecElt(TreeTag): tag = 0x13
class Map(TreeTag): tag = 0x14
class MapKey(DataTag): tag = 0x15
class MapVal(DataTag): tag = 0x16
class Opaque(DataTag): tag = 0x17
# 0x18-0x1f HOLE
def load_tags(klass):
tag_map = {}
for subklass in klass.__mro__[::-1]:
for tv in subklass.__dict__.values():
try:
if not issubclass(tv, Tag):
continue
except TypeError:
continue
tag_map[tv.tag] = tv
return tag_map
| {
"repo_name": "arielb1/rtypes",
"path": "src/metadata/rbml.py",
"copies": "1",
"size": "4298",
"license": "mit",
"hash": 3707033288099061000,
"line_mean": 28.0405405405,
"line_max": 70,
"alpha_frac": 0.5635179153,
"autogenerated": false,
"ratio": 3.212257100149477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.921944870980367,
"avg_score": 0.011265261129161283,
"num_lines": 148
} |
__all__ = ['TAG_TUPLE_STRING_PARSERS']
import re
TAG_TUPLE_STRING_PARSERS = []
def pep440(tag_str):
"""
Parse tag_str in accordance with PEP440.
https://www.python.org/dev/peps/pep-0440/
"""
# Strip common non-numeric numbers from beginning of string
tag_str = tag_str.lstrip('v')
# Pull epoch from tag string
epoch = 0
if '!' in tag_str:
index = tag_str.index('!')
epoch = int(tag_str[:index])
tag_str = tag_str[index + 1:]
version_list = [epoch]
for chunk in tag_str.split('.'):
try:
version_list.append(int(chunk))
except ValueError:
return None # Drop support for a/b/rc/post/dev releases
match = re.match('^(\d+)([ab]|rc){1}(\d+)$', chunk)
if match:
version_list.append(int(match.group(1)))
version_list.append(int(match.group(3)))
continue
match = re.match('^post(\d+)$', chunk)
if match:
version_list.append(int(match.group(1)))
continue
match = re.match('^dev(\d+)$', chunk)
if match:
version_list.append(int(match.group(1)))
continue
version_list.append(0)
return tuple(version_list[1:]) # Drop epoch
TAG_TUPLE_STRING_PARSERS.append(pep440)
| {
"repo_name": "logston/plottags",
"path": "plottags/tag_parsers.py",
"copies": "1",
"size": "1367",
"license": "bsd-3-clause",
"hash": -1763200939413015000,
"line_mean": 29.3777777778,
"line_max": 68,
"alpha_frac": 0.5369422092,
"autogenerated": false,
"ratio": 3.6164021164021163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653344325602117,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Takeoff']
import time
import math
from pymavlink import mavutil
from ..connection import CommandLong
from ...configuration import Configuration
from ...state import State
from ...specification import Specification
from ...environment import Environment
from ...command import Parameter, Command
from ...specification import Specification, Idle
from ...valueRange import ContinuousValueRange
def timeout(a, s, e, c) -> float:
# FIXME add rate of ascent
delta_alt = abs(a['altitude'] - s.altitude)
t = delta_alt * c.time_per_metre_travelled
t += c.constant_timeout_offset
return t
TakeoffNormally = Specification(
'normal',
"""
(and
(= _armed true)
(= _mode "GUIDED")
(< _altitude 0.3))
""",
"""
(and
(= _longitude __longitude)
(= _latitude __latitude)
(= __altitude $altitude)
(= __vz 0.0))
""",
timeout)
class Takeoff(Command):
uid = 'ardu:copter:takeoff'
name = 'takeoff'
parameters = [
Parameter('altitude', ContinuousValueRange(0.3, 100.0))
]
specifications = [
TakeoffNormally,
Idle
]
def dispatch(self,
sandbox: 'Sandbox',
state: State,
environment: Environment,
config: Configuration
) -> None:
vehicle = sandbox.connection
msg = vehicle.message_factory.command_long_encode(
0, 0,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
0, 1, 0, 0, 0, 0, 0, self.altitude)
vehicle.send_mavlink(msg)
def to_message(self) -> CommandLong:
msg = CommandLong(
target_system=0,
target_component=0,
cmd_id=mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
param_1=0,
param_7=self.altitude)
return msg
| {
"repo_name": "squaresLab/Houston",
"path": "houston/ardu/copter/takeoff.py",
"copies": "1",
"size": "1876",
"license": "mit",
"hash": 2622813509021494300,
"line_mean": 24.0133333333,
"line_max": 63,
"alpha_frac": 0.5751599147,
"autogenerated": false,
"ratio": 3.8840579710144927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959217885714493,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.