text stringlengths 0 1.05M | meta dict |
|---|---|
"""An IPython notebook manager with GridFS as the backend."""
from tornado import web
import pymongo
import gridfs
import datetime
import json
from IPython.html.services.contents.manager import ContentsManager
from IPython.utils import tz
from IPython import nbformat
from IPython.utils.traitlets import Unicode
class GridFSContentsManager(ContentsManager):
mongo_uri = Unicode(
'mongodb://localhost:27017/',
config=True,
help="The URI to connect to the MongoDB instance. \
Defaults to 'mongodb://localhost:27017/'")
notebook_collection = Unicode(
'ipynb',
config=True,
help="Collection in mongo where notebook files are stored")
checkpoint_collection = Unicode(
'ipynb_checkpoints',
config=True,
help="Collection in mongo where notebook files are stored")
checkpoints_history = Unicode(
'ipynb_cphistory',
config=True,
help="Collection for checkpoints history")
def __init__(self, **kwargs):
super(GridFSContentsManager, self).__init__(**kwargs)
self.MONGODB_DETAILS = pymongo.uri_parser.parse_uri(self.mongo_uri)
self.mongo_username = self.MONGODB_DETAILS['username'] or ''
self.mongo_password = self.MONGODB_DETAILS['password'] or ''
#if db is not specified
self.database_name = self.MONGODB_DETAILS['database'] or 'ipython'
self._conn = self._connect_server()
#Mongo connectors
def _connect_server(self):
"""
Returns a mongo client instance
"""
return pymongo.MongoClient(self.mongo_uri)
def _connect_collection(self, collection):
"""
Returns the collection
"""
db = self._conn[self.database_name]
# Authenticate against database
if self.mongo_username and self.mongo_password:
db.authenticate(self.mongo_username, self.mongo_password)
return db[collection]
def _get_fs_instance(self):
"""
Returns a GridFS instance
"""
try:
db = getattr(pymongo.MongoClient(
self.mongo_uri), self.database_name)
fs = gridfs.GridFS(db, collection=self.notebook_collection)
except Exception, e:
raise e
return fs
def file_exists(self, path):
"""
Does a file exist at the given collection in gridFS?
Like os.path.exists
Parameters
----------
path : string
The name of the file in the gridfs
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
file_collection = self._get_fs_instance().list()
if path == '':
return False
if path in file_collection:
return True
return False
def exists(self, path):
"""
Does a file or dir exist at the given collection in gridFS?
We do not have dir so dir_exists returns true.
Parameters
----------
path : string
The relative path to the file's directory (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
return self.file_exists(path) or self.dir_exists(path)
def dir_exists(self, path=''):
"""
GridFS doesn't have directory so it doesn't exist
If path is set to blank, returns True because,
technically, the collection root itself is a directory
"""
if path == '':
return True
else:
return False
def is_hidden(self, path=''):
"""
gridfs doesn't hide anything
"""
return False
def get(self, path, content=True, type=None, format=None):
"""
Takes a filename for an entity and returns its model
Parameters
----------
path : str
the filename that is expected to be in gridfs
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'notebook'
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directorys: %s' % path)
if path == '':
if type not in (None, 'directory'):
raise web.HTTPError(400, u'%s is a directory, not a %s' % (
path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
raise web.HTTPError(400, u'%s is not a directory' % path,
reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
# public checkpoint API
def create_checkpoint(self, path=''):
"""
Creates a checkpoint for the notebook
"""
path = path.strip('/')
spec = {
'path': path,
}
notebook = self._get_fs_instance().get_version(path)
chid = str(notebook._id)
cp_id = str(self._connect_collection(
self.checkpoint_collection).find(spec).count())
spec['cp'] = cp_id
spec['id'] = chid
last_modified = datetime.datetime.utcnow()
spec['lastModified'] = last_modified
newnotebook = {'$set': {'_id': chid}}
self.log.info("Saving checkpoint for notebook %s" % path)
self._connect_collection(
self.checkpoint_collection).update(spec, newnotebook, upsert=True)
# return the checkpoint info
return dict(id=cp_id, last_modified=last_modified)
def list_checkpoints(self, path=''):
"""
lists all checkpoints for the notebook
"""
path = path.strip('/')
spec = {
'path': path,
}
checkpoints = list(self._connect_collection(
self.checkpoint_collection).find(spec))
return [dict(
id=c['cp'], last_modified=c['lastModified']) for c in checkpoints]
def save(self, model, path=''):
"""
Save the file model and return the model with no content.
"""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
# One checkpoint should always exist
if self.file_exists(path) and not self.list_checkpoints(path):
self.create_checkpoint(path)
self.log.debug("Saving %s", path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(path, nb)
# One checkpoint should always exist for notebooks.
# if not self.list_checkpoints(path):
# self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(path, model, path)
else:
raise web.HTTPError(
400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(
u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(
500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
return model
def rename(self, old_path, new_path):
"""
Renames a notebook
"""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
fs = self._get_fs_instance()
if new_path == old_path:
return
if self.file_exists(new_path):
raise web.HTTPError(409, u'Notebook already exists: %s' % new_path)
# Move the file
try:
grid_file = fs.get_version(old_path)._id
nb = nbformat.from_dict(
json.loads(fs.get(grid_file).read()))
fs.put(json.dumps(nb), filename=new_path)
self.delete(old_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (
old_path, e))
# Move the checkpoints
spec = {
'path': old_path,
}
modify = {
'$set': {
'path': new_path,
}
}
self._connect_collection(
self.checkpoint_collection).update(spec, modify, multi=True)
def delete(self, path):
"""Delete notebook"""
if not self.file_exists(path):
print "hello"
return
path = path.strip('/')
fs = self._get_fs_instance()
gridfile = fs.get_version(path)._id
fs.delete(gridfile)
def _save_notebook(self, os_path, nb):
"""Saves a notebook to an gridFS."""
self._get_fs_instance().put(
json.dumps(nb, sort_keys=True), filename=os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
last_modified = tz.utcnow()
created = tz.utcnow()
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
model['writable'] = True
return model
def _dir_model(self, path, content=True):
"""
Build a model to return all of the files in gridfs
if content is requested, will include a listing of the directory
"""
model = self._base_model(path)
model['type'] = 'directory'
model['content'] = contents = []
file_collection = self._get_fs_instance().list()
for name in file_collection:
contents.append(self.get(
path='%s' % (name),
content=content)
)
model['format'] = 'json'
return model
def _notebook_model(self, path, content=True):
"""
Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
nb = self._read_notebook(path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def _read_notebook(self, path, as_version=4):
"""Read a notebook file from gridfs."""
fs = self._get_fs_instance()
file_id = fs.get_last_version(path)._id
try:
filename = fs.get(file_id)
except Exception, e:
raise web.HTTPError(
400,
u"An error occured while reading the Notebook \
on GridFS: %s %r" % (path, e),
)
try:
return nbformat.read(filename, as_version=as_version)
except Exception as e:
raise web.HTTPError(
400,
u"Unreadable Notebook: %s %r" % (path, e),
)
| {
"repo_name": "gaumire/ipynbstore-gridfs",
"path": "ipynbstore_gridfs/__init__.py",
"copies": "2",
"size": "12704",
"license": "mit",
"hash": -2485769811300935000,
"line_mean": 32.0833333333,
"line_max": 79,
"alpha_frac": 0.5479376574,
"autogenerated": false,
"ratio": 4.334356874786762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002603134359584214,
"num_lines": 384
} |
"""an irc bridge
relays messages between in-world chat, and an irc channel.
notifies of users joining & leaving the scene on irc too.
"""
import naali
import circuits
import ircclient
from circuits.net.protocols import irc #IRC, PRIVMSG, USER, NICK, JOIN, Nick
IRCCHANNEL = "#realxtend-dev"
NICKNAME = "tundraserver"
IRCSERVER = "irc.freenode.net"
PORT = 6667
class ServerRelay(circuits.Component):
channel = "ircclient"
def __init__(self, nick=NICKNAME, ircchannel=IRCCHANNEL, host=IRCSERVER, port=PORT):
if not naali.server.IsAboutToStart():
return
circuits.Component.__init__(self)
self.client = ircclient.Client(nick, ircchannel, host, port)
self += self.client #so that we can handle irc events here too
self.client.connect()
self.chatapp = None
self.scene = None
self.chatapp = None
naali.server.connect("UserConnected(int, UserConnection*)", self.onUserConnected)
naali.server.connect("UserDisconnected(int, UserConnection*)", self.onUserDisconnected)
#when live reloading, chatapp is already there:
self.scene = naali.getDefaultScene()
if self.scene is not None:
self.chatapp = self.scene.GetEntityByNameRaw("ChatApplication")
self.init_chatapp_handlers()
self.cmds = {
'help': self.help,
'users': self.users,
'fps': self.fps
}
def help(self):
msg = "i know: %s" % self.cmds.keys()
self.say(msg)
def users(self):
msg = "i have %d users (client connections) in-world" % len(naali.server.GetConnectionIDs())
self.say(msg)
def fps(self):
self.say("60fps, of course! (no, sorry, actually i didn't check")
def init_chatapp_handlers(self):
csm = self.chatapp.Action("ClientSendMessage")
csm.connect("Triggered(QString, QString, QString, QStringList)", self.onClientSendMessage)
#a naali event via circuits
def on_sceneadded(self, name): #XXX port to SceneAPI SceneAdded signal
if self.scene is not None: #XXX what to do with scene changes? aren't supposed to happen on server i guess?
print "IRC: WARNING -- reiniting with new scene, not really written for scene changing though!"
self.scene = naali.getScene(name)
self.scene.connect("EntityCreated(Scene::Entity*, AttributeChange::Type)", self.onNewEntity)
#a qt signal handler
def onNewEntity(self, entity, changeType):
#print entity.name
if entity.name == "ChatApplication":
self.chatapp = entity
print "IRC: Found ChatApp!"
self.scene.disconnect("EntityCreated(Scene::Entity*, AttributeChange::Type)", self.onNewEntity)
self.init_chatapp_handlers()
def onUserConnected(self, connid, user):
self.note("New user connected: %s" % user.GetProperty("username"))
def onUserDisconnected(self, connid, user):
self.note("User %s disconnected." % user.GetProperty("username"))
def onClientSendMessage(self, sender, msg):
print "IRC onClientSendMessage:", sender, msg
toirc = "[%s] %s" % (sender, msg)
self.say(toirc)
def say(self, msg):
self.push(irc.PRIVMSG(IRCCHANNEL, msg))
def note(self, msg):
self.push(irc.NOTICE(IRCCHANNEL, msg))
#a circuits event from the underlying irc client (self.client)
def message(self, source, target, message):
if target[0] == "#":
s = "<%s:%s> %s" % (target, source[0], message)
else:
s = "-%s- %s" % (source, message)
print "IRC:", s
if self.chatapp is not None:
self.chatapp.Exec(4, "ServerSendMessage", s)
nick = self.client.nick
#copy-paste from kbd -- consider just using kbd: https://bitbucket.org/prologic/kdb/src/0982b3f52af0/kdb/plugin.py
print "MSG, NICK:", message, nick
if message.startswith(nick):
message = message[len(nick):]
while len(message) > 0 and message[0] in [",", ":", " "]:
message = message[1:]
words = message.split()
cmd = words[0]
print "CMD", cmd
#args = words[1:]
if cmd in self.cmds:
self.cmds[cmd]() #(args)
| {
"repo_name": "antont/tundra",
"path": "src/Application/PythonScriptModule/pymodules_old/irc/serverrelay.py",
"copies": "1",
"size": "4373",
"license": "apache-2.0",
"hash": -3468946457752006000,
"line_mean": 34.5528455285,
"line_max": 122,
"alpha_frac": 0.6139949691,
"autogenerated": false,
"ratio": 3.6200331125827816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4734028081682782,
"avg_score": null,
"num_lines": null
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id$
import sys
import time
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
# We wait 15 seconds for a thread to terminate, but if it fails to,
# we don't want the process to hang at exit waiting for it...
self.setDaemon(True)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError("Bad request '%s'" % (key,))
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit...
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
# wait for them to terminate - pity we aren't using 'native' threads
# as then we could do a smart wait - but now we need to poll....
end_time = time.time() + self.worker_shutdown_wait/1000
alive = self.workers
while alive:
if time.time() > end_time:
# xxx - might be nice to log something here.
break
time.sleep(0.2)
alive = [w for w in alive if w.isAlive()]
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError("sub-classes should override Dispatch")
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print(file=ecb)
print("<H3>Traceback (most recent call last):</H3>", file=ecb)
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print("<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),), file=ecb)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print("FAILED to render the error message!")
traceback.print_exc()
print("ORIGINAL extension error:")
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/isapi/threaded_extension.py",
"copies": "7",
"size": "7213",
"license": "mit",
"hash": -8243819027181860000,
"line_mean": 41.1812865497,
"line_max": 85,
"alpha_frac": 0.6241508388,
"autogenerated": false,
"ratio": 4.360943168077388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005065417053114052,
"num_lines": 171
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id: threaded_extension.py,v 1.4 2006/02/17 04:04:24 mhammond Exp $
import sys
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
# Python 2.3 and earlier insists on "C" locale - if it isn't, subtle things
# break, such as floating point constants loaded from .pyc files.
# The threading module uses such floating-points as an argument to sleep(),
# resulting in extremely long sleeps when tiny intervals are specified.
# We can work around this by resetting the C locale before the import.
if sys.hexversion < 0x02040000:
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError, "Bad request '%s'" % (key,)
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit. XXX - per thread!!! Fix me!
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
for worker in self.workers:
worker.join(self.worker_shutdown_wait)
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError, "sub-classes should override Dispatch"
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print >> ecb
print >> ecb, "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print >> ecb, "<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print "FAILED to render the error message!"
traceback.print_exc()
print "ORIGINAL extension error:"
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "windyuuy/opera",
"path": "chromium/src/third_party/python_26/Lib/site-packages/isapi/threaded_extension.py",
"copies": "17",
"size": "7180",
"license": "bsd-3-clause",
"hash": -3612584868695917600,
"line_mean": 41.7380952381,
"line_max": 94,
"alpha_frac": 0.6378830084,
"autogenerated": false,
"ratio": 4.299401197604791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id: threaded_extension.py,v 1.5 2008/11/26 08:39:33 mhammond Exp $
import sys
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
# Python 2.3 and earlier insists on "C" locale - if it isn't, subtle things
# break, such as floating point constants loaded from .pyc files.
# The threading module uses such floating-points as an argument to sleep(),
# resulting in extremely long sleeps when tiny intervals are specified.
# We can work around this by resetting the C locale before the import.
if sys.hexversion < 0x02040000:
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError("Bad request '%s'" % (key,))
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit. XXX - per thread!!! Fix me!
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
for worker in self.workers:
worker.join(self.worker_shutdown_wait)
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError("sub-classes should override Dispatch")
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print >> ecb
print >> ecb, "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print >> ecb, "<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print "FAILED to render the error message!"
traceback.print_exc()
print "ORIGINAL extension error:"
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "slozier/ironpython2",
"path": "Src/StdLib/Lib/site-packages/isapi/threaded_extension.py",
"copies": "2",
"size": "7180",
"license": "apache-2.0",
"hash": -1416467206210230300,
"line_mean": 41.7380952381,
"line_max": 94,
"alpha_frac": 0.6378830084,
"autogenerated": false,
"ratio": 4.304556354916067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5942439363316067,
"avg_score": null,
"num_lines": null
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id: threaded_extension.py,v 1.6 2009/03/02 04:41:10 mhammond Exp $
import sys
import time
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
# Python 2.3 and earlier insists on "C" locale - if it isn't, subtle things
# break, such as floating point constants loaded from .pyc files.
# The threading module uses such floating-points as an argument to sleep(),
# resulting in extremely long sleeps when tiny intervals are specified.
# We can work around this by resetting the C locale before the import.
if sys.hexversion < 0x02040000:
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
# We wait 15 seconds for a thread to terminate, but if it fails to,
# we don't want the process to hang at exit waiting for it...
self.setDaemon(True)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError("Bad request '%s'" % (key,))
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit...
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
# wait for them to terminate - pity we aren't using 'native' threads
# as then we could do a smart wait - but now we need to poll....
end_time = time.time() + self.worker_shutdown_wait/1000
alive = self.workers
while alive:
if time.time() > end_time:
# xxx - might be nice to log something here.
break
time.sleep(0.2)
alive = [w for w in alive if w.isAlive()]
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError("sub-classes should override Dispatch")
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print >> ecb
print >> ecb, "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print >> ecb, "<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print "FAILED to render the error message!"
traceback.print_exc()
print "ORIGINAL extension error:"
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "igemsoftware/SYSU-Software2013",
"path": "project/Python27/Lib/site-packages/isapi/threaded_extension.py",
"copies": "2",
"size": "7722",
"license": "mit",
"hash": -1441987978785695700,
"line_mean": 41.9,
"line_max": 85,
"alpha_frac": 0.6323491323,
"autogenerated": false,
"ratio": 4.294771968854283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5927121101154282,
"avg_score": null,
"num_lines": null
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id$
import sys
import time
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
# We wait 15 seconds for a thread to terminate, but if it fails to,
# we don't want the process to hang at exit waiting for it...
self.setDaemon(True)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError("Bad request '%s'" % (key,))
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit...
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
# wait for them to terminate - pity we aren't using 'native' threads
# as then we could do a smart wait - but now we need to poll....
end_time = time.time() + self.worker_shutdown_wait/1000
alive = self.workers
while alive:
if time.time() > end_time:
# xxx - might be nice to log something here.
break
time.sleep(0.2)
alive = [w for w in alive if w.isAlive()]
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError("sub-classes should override Dispatch")
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print >> ecb
print >> ecb, "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print >> ecb, "<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print "FAILED to render the error message!"
traceback.print_exc()
print "ORIGINAL extension error:"
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/isapi/threaded_extension.py",
"copies": "1",
"size": "7373",
"license": "bsd-3-clause",
"hash": 1530243079445874200,
"line_mean": 41.1169590643,
"line_max": 85,
"alpha_frac": 0.6089787061,
"autogenerated": false,
"ratio": 4.41497005988024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.552394876598024,
"avg_score": null,
"num_lines": null
} |
"""An ISAPI extension base class implemented using a thread-pool."""
# $Id: threaded_extension.py,v 1.4 2006/02/17 04:04:24 mhammond Exp $
import sys
from isapi import isapicon, ExtensionError
import isapi.simple
from win32file import GetQueuedCompletionStatus, CreateIoCompletionPort, \
PostQueuedCompletionStatus, CloseHandle
from win32security import SetThreadToken
from win32event import INFINITE
from pywintypes import OVERLAPPED
# Python 2.3 and earlier insists on "C" locale - if it isn't, subtle things
# break, such as floating point constants loaded from .pyc files.
# The threading module uses such floating-points as an argument to sleep(),
# resulting in extremely long sleeps when tiny intervals are specified.
# We can work around this by resetting the C locale before the import.
if sys.hexversion < 0x02040000:
import locale
locale.setlocale(locale.LC_NUMERIC, "C")
import threading
import traceback
ISAPI_REQUEST = 1
ISAPI_SHUTDOWN = 2
class WorkerThread(threading.Thread):
def __init__(self, extension, io_req_port):
self.running = False
self.io_req_port = io_req_port
self.extension = extension
threading.Thread.__init__(self)
def run(self):
self.running = True
while self.running:
errCode, bytes, key, overlapped = \
GetQueuedCompletionStatus(self.io_req_port, INFINITE)
if key == ISAPI_SHUTDOWN and overlapped is None:
break
# Let the parent extension handle the command.
dispatcher = self.extension.dispatch_map.get(key)
if dispatcher is None:
raise RuntimeError, "Bad request '%s'" % (key,)
dispatcher(errCode, bytes, key, overlapped)
def call_handler(self, cblock):
self.extension.Dispatch(cblock)
# A generic thread-pool based extension, using IO Completion Ports.
# Sub-classes can override one method to implement a simple extension, or
# may leverage the CompletionPort to queue their own requests, and implement a
# fully asynch extension.
class ThreadPoolExtension(isapi.simple.SimpleExtension):
"Base class for an ISAPI extension based around a thread-pool"
max_workers = 20
worker_shutdown_wait = 15000 # 15 seconds for workers to quit. XXX - per thread!!! Fix me!
def __init__(self):
self.workers = []
# extensible dispatch map, for sub-classes that need to post their
# own requests to the completion port.
# Each of these functions is called with the result of
# GetQueuedCompletionStatus for our port.
self.dispatch_map = {
ISAPI_REQUEST: self.DispatchConnection,
}
def GetExtensionVersion(self, vi):
isapi.simple.SimpleExtension.GetExtensionVersion(self, vi)
# As per Q192800, the CompletionPort should be created with the number
# of processors, even if the number of worker threads is much larger.
# Passing 0 means the system picks the number.
self.io_req_port = CreateIoCompletionPort(-1, None, 0, 0)
# start up the workers
self.workers = []
for i in range(self.max_workers):
worker = WorkerThread(self, self.io_req_port)
worker.start()
self.workers.append(worker)
def HttpExtensionProc(self, control_block):
overlapped = OVERLAPPED()
overlapped.object = control_block
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_REQUEST, overlapped)
return isapicon.HSE_STATUS_PENDING
def TerminateExtension(self, status):
for worker in self.workers:
worker.running = False
for worker in self.workers:
PostQueuedCompletionStatus(self.io_req_port, 0, ISAPI_SHUTDOWN, None)
for worker in self.workers:
worker.join(self.worker_shutdown_wait)
self.dispatch_map = {} # break circles
CloseHandle(self.io_req_port)
# This is the one operation the base class supports - a simple
# Connection request. We setup the thread-token, and dispatch to the
# sub-class's 'Dispatch' method.
def DispatchConnection(self, errCode, bytes, key, overlapped):
control_block = overlapped.object
# setup the correct user for this request
hRequestToken = control_block.GetImpersonationToken()
SetThreadToken(None, hRequestToken)
try:
try:
self.Dispatch(control_block)
except:
self.HandleDispatchError(control_block)
finally:
# reset the security context
SetThreadToken(None, None)
def Dispatch(self, ecb):
"""Overridden by the sub-class to handle connection requests.
This class creates a thread-pool using a Windows completion port,
and dispatches requests via this port. Sub-classes can generally
implement each connection request using blocking reads and writes, and
the thread-pool will still provide decent response to the end user.
The sub-class can set a max_workers attribute (default is 20). Note
that this generally does *not* mean 20 threads will all be concurrently
running, via the magic of Windows completion ports.
There is no default implementation - sub-classes must implement this.
"""
raise NotImplementedError, "sub-classes should override Dispatch"
def HandleDispatchError(self, ecb):
"""Handles errors in the Dispatch method.
When a Dispatch method call fails, this method is called to handle
the exception. The default implementation formats the traceback
in the browser.
"""
ecb.HttpStatusCode = isapicon.HSE_STATUS_ERROR
#control_block.LogData = "we failed!"
exc_typ, exc_val, exc_tb = sys.exc_info()
limit = None
try:
try:
import cgi
ecb.SendResponseHeaders("200 OK", "Content-type: text/html\r\n\r\n",
False)
print >> ecb
print >> ecb, "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(exc_tb, limit) + \
traceback.format_exception_only(exc_typ, exc_val)
print >> ecb, "<PRE>%s<B>%s</B></PRE>" % (
cgi.escape("".join(list[:-1])), cgi.escape(list[-1]),)
except ExtensionError:
# The client disconnected without reading the error body -
# its probably not a real browser at the other end, ignore it.
pass
except:
print "FAILED to render the error message!"
traceback.print_exc()
print "ORIGINAL extension error:"
traceback.print_exception(exc_typ, exc_val, exc_tb)
finally:
# holding tracebacks in a local of a frame that may itself be
# part of a traceback used to be evil and cause leaks!
exc_tb = None
ecb.DoneWithSession()
| {
"repo_name": "Southpaw-TACTIC/Team",
"path": "src/python/Lib/site-packages/isapi/threaded_extension.py",
"copies": "1",
"size": "7348",
"license": "epl-1.0",
"hash": 6838582442933431000,
"line_mean": 41.7380952381,
"line_max": 94,
"alpha_frac": 0.6232988568,
"autogenerated": false,
"ratio": 4.342789598108747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5466088454908747,
"avg_score": null,
"num_lines": null
} |
# anis_coefficients.py
import healpy as hp
import numpy as np
import scipy.special as ss
"""
Script to compute the correlation basis-functions for various anisotropic
configurations of the GW background energy-density
-- Rutger van Haasteren (June 2014)
-- Stephen Taylor (modifications, February 2016)
"""
def real_sph_harm(mm, ll, phi, theta):
"""
The real-valued spherical harmonics.
"""
if mm > 0:
ans = (1.0 / np.sqrt(2)) * (ss.sph_harm(mm, ll, phi, theta) + ((-1) ** mm) * ss.sph_harm(-mm, ll, phi, theta))
elif mm == 0:
ans = ss.sph_harm(0, ll, phi, theta)
elif mm < 0:
ans = (1.0 / (np.sqrt(2) * complex(0.0, 1))) * (
ss.sph_harm(-mm, ll, phi, theta) - ((-1) ** mm) * ss.sph_harm(mm, ll, phi, theta)
)
return ans.real
def signalResponse_fast(ptheta_a, pphi_a, gwtheta_a, gwphi_a):
"""
Create the signal response matrix FAST
"""
# Create a meshgrid for both phi and theta directions
gwphi, pphi = np.meshgrid(gwphi_a, pphi_a)
gwtheta, ptheta = np.meshgrid(gwtheta_a, ptheta_a)
return createSignalResponse(pphi, ptheta, gwphi, gwtheta)
def createSignalResponse(pphi, ptheta, gwphi, gwtheta):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW propagation direction
@param gwtheta: Theta of GW propagation direction
@return: Signal response matrix of Earth-term
"""
Fp = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True)
Fc = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=False)
# Pixel maps are lumped together, polarization pixels are neighbours
F = np.zeros((Fp.shape[0], 2 * Fp.shape[1]))
F[:, 0::2] = Fp
F[:, 1::2] = Fc
return F
def createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True, norm=True):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW propagation direction
@param gwtheta: Theta of GW propagation direction
@param plus: Whether or not this is the plus-polarization
@param norm: Normalise the correlations to equal Jenet et. al (2005)
@return: Signal response matrix of Earth-term
"""
# Create the unit-direction vectors. First dimension
# will be collapsed later. Sign convention of Gair et al. (2014)
Omega = np.array([-np.sin(gwtheta) * np.cos(gwphi), -np.sin(gwtheta) * np.sin(gwphi), -np.cos(gwtheta)])
mhat = np.array([-np.sin(gwphi), np.cos(gwphi), np.zeros(gwphi.shape)])
nhat = np.array([-np.cos(gwphi) * np.cos(gwtheta), -np.cos(gwtheta) * np.sin(gwphi), np.sin(gwtheta)])
p = np.array([np.cos(pphi) * np.sin(ptheta), np.sin(pphi) * np.sin(ptheta), np.cos(ptheta)])
# There is a factor of 3/2 difference between the Hellings & Downs
# integral, and the one presented in Jenet et al. (2005; also used by Gair
# et al. 2014). This factor 'normalises' the correlation matrix.
npixels = Omega.shape[2]
if norm:
# Add extra factor of 3/2
c = np.sqrt(1.5) / np.sqrt(npixels)
else:
c = 1.0 / np.sqrt(npixels)
# Calculate the Fplus or Fcross antenna pattern. Definitions as in Gair et
# al. (2014), with right-handed coordinate system
if plus:
# The sum over axis=0 represents an inner-product
Fsig = (
0.5 * c * (np.sum(nhat * p, axis=0) ** 2 - np.sum(mhat * p, axis=0) ** 2) / (1 - np.sum(Omega * p, axis=0))
)
else:
# The sum over axis=0 represents an inner-product
Fsig = c * np.sum(mhat * p, axis=0) * np.sum(nhat * p, axis=0) / (1 - np.sum(Omega * p, axis=0))
return Fsig
def almFromClm(clm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function
just takes the imaginary part of the abs(m) alm index.
"""
maxl = int(np.sqrt(len(clm))) - 1
nalm = hp.Alm.getsize(maxl)
alm = np.zeros((nalm), dtype=np.complex128)
clmindex = 0
for ll in range(0, maxl + 1):
for mm in range(-ll, ll + 1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
alm[almindex] += clm[clmindex]
elif mm < 0:
alm[almindex] -= 1j * clm[clmindex] / np.sqrt(2)
elif mm > 0:
alm[almindex] += clm[clmindex] / np.sqrt(2)
clmindex += 1
return alm
def clmFromAlm(alm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function
just takes the imaginary part of the abs(m) alm index.
"""
nalm = len(alm)
maxl = int(np.sqrt(9.0 - 4.0 * (2.0 - 2.0 * nalm)) * 0.5 - 1.5) # Really?
nclm = (maxl + 1) ** 2
# Check the solution. Went wrong one time..
if nalm != int(0.5 * (maxl + 1) * (maxl + 2)):
raise ValueError("Check numerical precision. This should not happen")
clm = np.zeros(nclm)
clmindex = 0
for ll in range(0, maxl + 1):
for mm in range(-ll, ll + 1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
clm[clmindex] = alm[almindex].real
elif mm < 0:
clm[clmindex] = -alm[almindex].imag * np.sqrt(2)
elif mm > 0:
clm[clmindex] = alm[almindex].real * np.sqrt(2)
clmindex += 1
return clm
def mapFromClm_fast(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
Use Healpix spherical harmonics for computational efficiency
"""
maxl = int(np.sqrt(len(clm))) - 1
alm = almFromClm(clm)
h = hp.alm2map(alm, nside, maxl, verbose=False)
return h
def mapFromClm(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
Use real_sph_harm for the map
"""
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
h = np.zeros(npixels)
ind = 0
maxl = int(np.sqrt(len(clm))) - 1
for ll in range(maxl + 1):
for mm in range(-ll, ll + 1):
h += clm[ind] * real_sph_harm(mm, ll, pixels[1], pixels[0])
ind += 1
return h
def clmFromMap_fast(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
Use Healpix spherical harmonics for computational efficiency
"""
alm = hp.sphtfunc.map2alm(h, lmax=lmax)
alm[0] = np.sum(h) * np.sqrt(4 * np.pi) / len(h)
return clmFromAlm(alm)
def clmFromMap(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
Use real_sph_harm for the map
"""
npixels = len(h)
nside = hp.npix2nside(npixels)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
clm = np.zeros((lmax + 1) ** 2)
ind = 0
for ll in range(lmax + 1):
for mm in range(-ll, ll + 1):
clm[ind] += np.sum(h * real_sph_harm(mm, ll, pixels[1], pixels[0]))
ind += 1
return clm * 4 * np.pi / npixels
def getCov(clm, nside, F_e):
"""
Given a vector of clm values, construct the covariance matrix
@param clm: Array with Clm values
@param nside: Healpix nside resolution
@param F_e: Signal response matrix
@return: Cross-pulsar correlation for this array of clm values
"""
# Create a sky-map (power)
# Use mapFromClm to compare to real_sph_harm. Fast uses Healpix
# sh00 = mapFromClm(clm, nside)
sh00 = mapFromClm_fast(clm, nside)
# Double the power (one for each polarization)
sh = np.array([sh00, sh00]).T.flatten()
# Create the cross-pulsar covariance
hdcov_F = np.dot(F_e * sh, F_e.T)
# The pulsar term is added (only diagonals: uncorrelated)
return hdcov_F + np.diag(np.diag(hdcov_F))
def anis_basis(psr_locs, lmax, nside=32):
"""
Calculate the correlation basis matrices using the pixel-space
transormations
@param psr_locs: Location of the pulsars [phi, theta]
@param lmax: Maximum l to go up to
@param nside: What nside to use in the pixelation [32]
Note: GW directions are in direction of GW propagation
"""
pphi = psr_locs[:, 0]
ptheta = psr_locs[:, 1]
# Create the pixels
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
gwtheta = pixels[0]
gwphi = pixels[1]
# Create the signal response matrix
F_e = signalResponse_fast(ptheta, pphi, gwtheta, gwphi)
# Loop over all (l,m)
basis = []
nclm = (lmax + 1) ** 2
clmindex = 0
for ll in range(0, lmax + 1):
for mm in range(-ll, ll + 1):
clm = np.zeros(nclm)
clm[clmindex] = 1.0
basis.append(getCov(clm, nside, F_e))
clmindex += 1
return np.array(basis)
def orfFromMap_fast(psr_locs, usermap, response=None):
"""
Calculate an ORF from a user-defined sky map.
@param psr_locs: Location of the pulsars [phi, theta]
@param usermap: Provide a healpix map for GW power
Note: GW directions are in direction of GW propagation
"""
if response is None:
pphi = psr_locs[:, 0]
ptheta = psr_locs[:, 1]
# Create the pixels
nside = hp.npix2nside(len(usermap))
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
gwtheta = pixels[0]
gwphi = pixels[1]
# Create the signal response matrix
F_e = signalResponse_fast(ptheta, pphi, gwtheta, gwphi)
elif response is not None:
F_e = response
# Double the power (one for each polarization)
sh = np.array([usermap, usermap]).T.flatten()
# Create the cross-pulsar covariance
hdcov_F = np.dot(F_e * sh, F_e.T)
# The pulsar term is added (only diagonals: uncorrelated)
return hdcov_F + np.diag(np.diag(hdcov_F))
| {
"repo_name": "jellis18/enterprise",
"path": "enterprise/signals/anis_coefficients.py",
"copies": "2",
"size": "10948",
"license": "mit",
"hash": -6896222370989619000,
"line_mean": 28.912568306,
"line_max": 119,
"alpha_frac": 0.6084216295,
"autogenerated": false,
"ratio": 3.038578961976131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46470005914761314,
"avg_score": null,
"num_lines": null
} |
# An island in matrix is a group of linked areas, all having the same value.
# This code counts number of islands in a given matrix, with including diagonal
# connections.
class matrix: # Public class to implement a graph
def __init__(self, row: int, col: int, graph: list):
self.ROW = row
self.COL = col
self.graph = graph
def is_safe(self, i, j, visited) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def diffs(self, i, j, visited): # Checking all 8 elements surrounding nth element
rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
colNbr = [-1, 0, 1, -1, 1, -1, 0, 1]
visited[i][j] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + rowNbr[k], j + colNbr[k], visited):
self.diffs(i + rowNbr[k], j + colNbr[k], visited)
def count_islands(self) -> int: # And finally, count all islands.
visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
count = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(i, j, visited)
count += 1
return count
| {
"repo_name": "TheAlgorithms/Python",
"path": "matrix/count_islands_in_matrix.py",
"copies": "1",
"size": "1383",
"license": "mit",
"hash": 7517766221948299000,
"line_mean": 37.4166666667,
"line_max": 86,
"alpha_frac": 0.5372378886,
"autogenerated": false,
"ratio": 3.4064039408866993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9442045405476482,
"avg_score": 0.00031928480204342275,
"num_lines": 36
} |
"""An IsoSurface module that allows the user to make contours of input
point data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool
# Local imports
from mayavi.core.module import Module
from mayavi.components.contour import Contour
from mayavi.components.poly_data_normals import PolyDataNormals
from mayavi.components.actor import Actor
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `IsoSurface` class.
######################################################################
class IsoSurface(Module):
# The version of this class. Used for persistence.
__version__ = 0
# The contour component.
contour = Instance(Contour, record=True)
# Specify if normals are to be computed to make a smoother surface.
compute_normals = Bool(True, desc='if normals are to be computed '\
'to make the iso-surface smoother')
# The component that computes the normals.
normals = Instance(PolyDataNormals, record=True)
# The actor component that represents the iso-surface.
actor = Instance(Actor, record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['scalars'])
########################################
# The view of this object.
# Commented out, since we are now using the iso_surface_view.py version.
#view = View([Group(
# Item( name = 'contour',
# style = 'custom' ),
# show_labels = False,
# show_border = True,
# label = 'Contours' ),
# Group(
# Item( name = 'compute_normals' ),
# '_',
# Item( name = 'normals',
# style = 'custom',
# show_label = False,
# enabled_when = 'compute_normals' ),
# show_border = True,
# label = 'Normals' ),
# Group(
# Item( name = 'actor',
# style = 'custom' ),
# show_labels = False )
# ]
# )
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create the components
self.contour = Contour(show_filled_contours=False)
self.normals = PolyDataNormals()
self.actor = Actor()
# Setup the actor suitably for this module.
self.actor.mapper.scalar_visibility = 1
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
# Data is available, so set the input for the grid plane.
self.contour.inputs = [mm.source]
# Force the normals setting to be noted.
self._compute_normals_changed(self.compute_normals)
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the component should do the rest.
self.data_changed = True
######################################################################
# Non-public interface.
######################################################################
def _compute_normals_changed(self, value):
if self.module_manager is None:
return
actor = self.actor
if value:
if actor:
actor.inputs = [self.normals]
else:
if actor:
actor.inputs = [self.contour]
self.render()
def _contour_changed(self, old, new):
normals = self.normals
if normals is not None:
normals.inputs = [new]
self._change_components(old, new)
def _normals_changed(self, old, new):
contour = self.contour
if contour is not None:
new.inputs = [contour]
self._change_components(old, new)
def _actor_changed(self, old, new):
# Here we set the inputs in any case to avoid VTK pipeline
# errors. The pipeline is corrected when update_pipeline is
# called anyway.
contour = self.contour
if contour is not None:
new.inputs = [contour]
self._change_components(old, new)
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/modules/iso_surface.py",
"copies": "5",
"size": "5755",
"license": "bsd-3-clause",
"hash": 4028035085261461500,
"line_mean": 35.4240506329,
"line_max": 76,
"alpha_frac": 0.5259774109,
"autogenerated": false,
"ratio": 4.648626817447496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7674604228347496,
"avg_score": null,
"num_lines": null
} |
''' Anisotropic to isotropic voxel conversion '''
from __future__ import division, print_function, absolute_import
from dipy.align.reslice import reslice
from warnings import warn
def resample(data, affine, zooms, new_zooms, order=1, mode='constant', cval=0):
"""Reslice data with new voxel resolution defined by ``new_zooms``
Parameters
----------
data : array, shape (I,J,K) or (I,J,K,N)
3d volume or 4d volume with datasets
affine : array, shape (4,4)
mapping from voxel coordinates to world coordinates
zooms : tuple, shape (3,)
voxel size for (i,j,k) dimensions
new_zooms : tuple, shape (3,)
new voxel size for (i,j,k) after resampling
order : int, from 0 to 5
order of interpolation for resampling/reslicing,
0 nearest interpolation, 1 trilinear etc..
if you don't want any smoothing 0 is the option you need.
mode : string ('constant', 'nearest', 'reflect' or 'wrap')
Points outside the boundaries of the input are filled according
to the given mode.
cval : float
Value used for points outside the boundaries of the input if
mode='constant'.
Returns
-------
data2 : array, shape (I,J,K) or (I,J,K,N)
datasets resampled into isotropic voxel size
affine2 : array, shape (4,4)
new affine for the resampled image
Examples
--------
>>> import nibabel as nib
>>> from dipy.align.reslice import reslice
>>> from dipy.data import get_data
>>> fimg = get_data('aniso_vox')
>>> img = nib.load(fimg)
>>> data = img.get_data()
>>> data.shape
(58, 58, 24)
>>> affine = img.get_affine()
>>> zooms = img.get_header().get_zooms()[:3]
>>> zooms
(4.0, 4.0, 5.0)
>>> new_zooms = (3.,3.,3.)
>>> new_zooms
(3.0, 3.0, 3.0)
>>> data2, affine2 = reslice(data, affine, zooms, new_zooms)
>>> data2.shape
(77, 77, 40)
"""
msg = "This function is deprecated please use dipy.align.reslice.reslice"
msg += " instead."
warn(msg)
return reslice(data, affine, zooms, new_zooms, order, mode, cval)
| {
"repo_name": "demianw/dipy",
"path": "dipy/align/aniso2iso.py",
"copies": "9",
"size": "2138",
"license": "bsd-3-clause",
"hash": -4638538385889578000,
"line_mean": 32.40625,
"line_max": 79,
"alpha_frac": 0.6122544434,
"autogenerated": false,
"ratio": 3.415335463258786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015625,
"num_lines": 64
} |
''' Anisotropic to isotropic voxel conversion '''
import numpy as np
from scipy.ndimage import affine_transform
def resample(data,affine,zooms,new_zooms,order=1):
''' Resample data from anisotropic to isotropic voxel size
Parameters
----------
data : array, shape (I,J,K) or (I,J,K,N)
3d volume or 4d volume with datasets
affine : array, shape (4,4)
mapping from voxel coordinates to world coordinates
zooms : tuple, shape (3,)
voxel size for (i,j,k) dimensions
new_zooms : tuple, shape (3,)
new voxel size for (i,j,k) after resampling
order : int, from 0 to 5
order of interpolation for resampling/reslicing,
0 nearest interpolation, 1 trilinear etc..
if you don't want any smoothing 0 is the option you need.
Returns
-------
data2 : array, shape (I,J,K) or (I,J,K,N)
datasets resampled into isotropic voxel size
affine2 : array, shape (4,4)
new affine for the resampled image
Notes
-----
It is also possible with this function to resample/reslice from isotropic voxel size to anisotropic
or from isotropic to isotropic or even from anisotropic to anisotropic, as long as you provide
the correct zooms (voxel sizes) and new_zooms (new voxel sizes). It is fairly easy to get the correct
zooms using nibabel as show in the example below.
Examples
--------
>>> import nibabel as nib
>>> from dipy.align.aniso2iso import resample
>>> from dipy.data import get_data
>>> fimg=get_data('aniso_vox')
>>> img=nib.load(fimg)
>>> data=img.get_data()
>>> data.shape
(58, 58, 24)
>>> affine=img.get_affine()
>>> zooms=img.get_header().get_zooms()[:3]
>>> zooms
(4.0, 4.0, 5.0)
>>> new_zooms=(3.,3.,3.)
>>> new_zooms
(3.0, 3.0, 3.0)
>>> data2,affine2=resample(data,affine,zooms,new_zooms)
>>> data2.shape
(77, 77, 40)
'''
R=np.diag(np.array(new_zooms)/np.array(zooms))
new_shape=np.array(zooms)/np.array(new_zooms) * np.array(data.shape[:3])
new_shape=np.round(new_shape).astype('i8')
if data.ndim==3:
data2=affine_transform(input=data,matrix=R,offset=np.zeros(3,),output_shape=tuple(new_shape),order=order)
if data.ndim==4:
data2l=[]
for i in range(data.shape[-1]):
tmp=affine_transform(input=data[...,i],matrix=R,offset=np.zeros(3,),output_shape=tuple(new_shape),order=order)
data2l.append(tmp)
data2=np.zeros(tmp.shape+(data.shape[-1],),data.dtype)
for i in range(data.shape[-1]):
data2[...,i]=data2l[i]
Rx=np.eye(4)
Rx[:3,:3]=R
affine2=np.dot(affine,Rx)
return data2,affine2
| {
"repo_name": "maurozucchelli/dipy",
"path": "dipy/align/aniso2iso.py",
"copies": "1",
"size": "2788",
"license": "bsd-3-clause",
"hash": 8148681448779873000,
"line_mean": 35.6842105263,
"line_max": 122,
"alpha_frac": 0.6076040172,
"autogenerated": false,
"ratio": 3.1361079865016874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42437120037016873,
"avg_score": null,
"num_lines": null
} |
''' Anisotropic to isotropic voxel conversion '''
import numpy as np
from scipy.ndimage import affine_transform
def resample(data, affine, zooms, new_zooms, order=1, mode='constant', cval=0):
"""Resample data from anisotropic to isotropic voxel size
Parameters
----------
data : array, shape (I,J,K) or (I,J,K,N)
3d volume or 4d volume with datasets
affine : array, shape (4,4)
mapping from voxel coordinates to world coordinates
zooms : tuple, shape (3,)
voxel size for (i,j,k) dimensions
new_zooms : tuple, shape (3,)
new voxel size for (i,j,k) after resampling
order : int, from 0 to 5
order of interpolation for resampling/reslicing,
0 nearest interpolation, 1 trilinear etc..
if you don't want any smoothing 0 is the option you need.
mode : string ('constant', 'nearest', 'reflect' or 'wrap')
Points outside the boundaries of the input are filled according
to the given mode.
cval : float
Value used for points outside the boundaries of the input if
mode='constant'.
Returns
-------
data2 : array, shape (I,J,K) or (I,J,K,N)
datasets resampled into isotropic voxel size
affine2 : array, shape (4,4)
new affine for the resampled image
Notes
-----
It is also possible with this function to resample/reslice from isotropic
voxel size to anisotropic or from isotropic to isotropic or even from
anisotropic to anisotropic, as long as you provide the correct zooms
(voxel sizes) and new_zooms (new voxel sizes). It is fairly easy to get the
correct zooms using nibabel as show in the example below.
Examples
--------
>>> import nibabel as nib
>>> from dipy.align.aniso2iso import resample
>>> from dipy.data import get_data
>>> fimg = get_data('aniso_vox')
>>> img = nib.load(fimg)
>>> data = img.get_data()
>>> data.shape
(58, 58, 24)
>>> affine = img.get_affine()
>>> zooms = img.get_header().get_zooms()[:3]
>>> zooms
(4.0, 4.0, 5.0)
>>> new_zooms = (3.,3.,3.)
>>> new_zooms
(3.0, 3.0, 3.0)
>>> data2, affine2 = resample(data, affine, zooms, new_zooms)
>>> data2.shape
(77, 77, 40)
"""
R = np.diag(np.array(new_zooms)/np.array(zooms))
new_shape = np.array(zooms)/np.array(new_zooms) * np.array(data.shape[:3])
new_shape = np.round(new_shape).astype('i8')
if data.ndim == 3:
data2 = affine_transform(input=data, matrix=R, offset=np.zeros(3,),
output_shape=tuple(new_shape),
order=order, mode=mode, cval=cval)
if data.ndim == 4:
data2l=[]
for i in range(data.shape[-1]):
tmp = affine_transform(input=data[..., i], matrix=R,
offset=np.zeros(3,),
output_shape=tuple(new_shape),
order=order, mode=mode, cval=cval)
data2l.append(tmp)
data2 = np.zeros(tmp.shape+(data.shape[-1],), data.dtype)
for i in range(data.shape[-1]):
data2[..., i] = data2l[i]
Rx = np.eye(4)
Rx[:3, :3] = R
affine2 = np.dot(affine, Rx)
return data2, affine2
| {
"repo_name": "samuelstjean/dipy",
"path": "dipy/align/aniso2iso.py",
"copies": "1",
"size": "3299",
"license": "bsd-3-clause",
"hash": -6086811539444071000,
"line_mean": 36.0674157303,
"line_max": 79,
"alpha_frac": 0.5819945438,
"autogenerated": false,
"ratio": 3.4045407636738907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44865353074738906,
"avg_score": null,
"num_lines": null
} |
""" An item in a Perspective contents list. """
# Enthought library imports.
from enthought.traits.api import Enum, Float, HasTraits, Str, implements
# Local imports.
from i_perspective_item import IPerspectiveItem
class PerspectiveItem(HasTraits):
""" An item in a Perspective contents list. """
implements(IPerspectiveItem)
# The Id of the view to display in the perspective.
id = Str
# The position of the view relative to the item specified in the
# 'relative_to' trait.
#
# 'top' puts the view above the 'relative_to' item.
# 'bottom' puts the view below the 'relative_to' item.
# 'left' puts the view to the left of the 'relative_to' item.
# 'right' puts the view to the right of the 'relative_to' item.
# 'with' puts the view in the same region as the 'relative_to' item.
#
# If the position is specified as 'with' you must specify a 'relative_to'
# item other than the editor area (i.e., you cannot position a view 'with'
# the editor area).
position = Enum('left', 'top', 'bottom', 'right', 'with')
# The Id of the view to position relative to. If this is not specified
# (or if no view exists with this Id) then the view will be placed relative
# to the editor area.
relative_to = Str
# The width of the item (as a fraction of the window width).
#
# e.g. 0.5 == half the window width.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
width = Float(-1)
# The height of the item (as a fraction of the window height).
#
# e.g. 0.5 == half the window height.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
height = Float(-1)
# The style of the dock control created.
style_hint = Enum('tab', 'vertical', 'horizontal', 'fixed')
#### EOF ######################################################################
| {
"repo_name": "enthought/traitsgui",
"path": "enthought/pyface/workbench/perspective_item.py",
"copies": "1",
"size": "2040",
"license": "bsd-3-clause",
"hash": -1413222829714755000,
"line_mean": 34.7894736842,
"line_max": 79,
"alpha_frac": 0.6357843137,
"autogenerated": false,
"ratio": 3.8345864661654137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969274288637344,
"avg_score": 0.0002192982456140351,
"num_lines": 57
} |
""" An item in a Perspective contents list. """
# Enthought library imports.
from traits.api import Enum, Float, HasTraits, provides, Str
# Local imports.
from i_perspective_item import IPerspectiveItem
@provides(IPerspectiveItem)
class PerspectiveItem(HasTraits):
""" An item in a Perspective contents list. """
# The Id of the view to display in the perspective.
id = Str
# The position of the view relative to the item specified in the
# 'relative_to' trait.
#
# 'top' puts the view above the 'relative_to' item.
# 'bottom' puts the view below the 'relative_to' item.
# 'left' puts the view to the left of the 'relative_to' item.
# 'right' puts the view to the right of the 'relative_to' item.
# 'with' puts the view in the same region as the 'relative_to' item.
#
# If the position is specified as 'with' you must specify a 'relative_to'
# item other than the editor area (i.e., you cannot position a view 'with'
# the editor area).
position = Enum('left', 'top', 'bottom', 'right', 'with')
# The Id of the view to position relative to. If this is not specified
# (or if no view exists with this Id) then the view will be placed relative
# to the editor area.
relative_to = Str
# The width of the item (as a fraction of the window width).
#
# e.g. 0.5 == half the window width.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
width = Float(-1)
# The height of the item (as a fraction of the window height).
#
# e.g. 0.5 == half the window height.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
height = Float(-1)
# The style of the dock control created.
style_hint = Enum('tab', 'vertical', 'horizontal', 'fixed')
#### EOF ######################################################################
| {
"repo_name": "brett-patterson/pyface",
"path": "pyface/workbench/perspective_item.py",
"copies": "2",
"size": "2021",
"license": "bsd-3-clause",
"hash": -4456797159542711300,
"line_mean": 35.7454545455,
"line_max": 79,
"alpha_frac": 0.635329045,
"autogenerated": false,
"ratio": 3.820415879017013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5455744924017013,
"avg_score": null,
"num_lines": null
} |
""" An item in a Perspective contents list. """
# Enthought library imports.
from traits.api import Enum, Float, HasTraits, provides, Str
# Local imports.
from .i_perspective_item import IPerspectiveItem
@provides(IPerspectiveItem)
class PerspectiveItem(HasTraits):
""" An item in a Perspective contents list. """
# The Id of the view to display in the perspective.
id = Str
# The position of the view relative to the item specified in the
# 'relative_to' trait.
#
# 'top' puts the view above the 'relative_to' item.
# 'bottom' puts the view below the 'relative_to' item.
# 'left' puts the view to the left of the 'relative_to' item.
# 'right' puts the view to the right of the 'relative_to' item.
# 'with' puts the view in the same region as the 'relative_to' item.
#
# If the position is specified as 'with' you must specify a 'relative_to'
# item other than the editor area (i.e., you cannot position a view 'with'
# the editor area).
position = Enum('left', 'top', 'bottom', 'right', 'with')
# The Id of the view to position relative to. If this is not specified
# (or if no view exists with this Id) then the view will be placed relative
# to the editor area.
relative_to = Str
# The width of the item (as a fraction of the window width).
#
# e.g. 0.5 == half the window width.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
width = Float(-1)
# The height of the item (as a fraction of the window height).
#
# e.g. 0.5 == half the window height.
#
# Note that this is treated as a suggestion, and it may not be possible
# for the workbench to allocate the space requested.
height = Float(-1)
# The style of the dock control created.
style_hint = Enum('tab', 'vertical', 'horizontal', 'fixed')
#### EOF ######################################################################
| {
"repo_name": "geggo/pyface",
"path": "pyface/workbench/perspective_item.py",
"copies": "3",
"size": "2022",
"license": "bsd-3-clause",
"hash": 5774729741490628000,
"line_mean": 35.7636363636,
"line_max": 79,
"alpha_frac": 0.6350148368,
"autogenerated": false,
"ratio": 3.8150943396226413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5950109176422641,
"avg_score": null,
"num_lines": null
} |
# An iterator for stepping through invoice emails via imap.
import os
try:
address = os.environ['WIRECARD_EMAIL']
password = os.environ['WIRECARD_EMAIL_PASSWORD']
except:
print "Set your email and password in environment variables WIRECARD_EMAIL and WIRECARD_EMAIL_PASSWORD."
print
print "For example"
print " export WIRECARD_EMAIL=me@bemmu.com"
print " export WIRECARD_PASSWORD=veryverysecret"
exit()
def iterate_wirecard_invoices():
import imaplib
import email
import base64
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login(address, password)
mail.select("[Gmail]/All Mail")
result, data = mail.uid('search', '(HEADER From "invoice@wirecard.com")')
for uid in data[0].split():
result, data = mail.uid('fetch', uid, '(RFC822)')
raw_email = data[0][1]
email_message = email.message_from_string(raw_email)
assert(email_message.is_multipart())
pdf = email_message.get_payload(1)
pdf = base64.b64decode(pdf.get_payload())
assert(pdf[1:4] == "PDF")
yield {
"Subject" : email_message['Subject'],
"pdf" : pdf
}
if __name__ == "__main__":
for invoice in iterate_wirecard_invoices():
print invoice["Subject"]
| {
"repo_name": "Bemmu/wirecard_email_parser",
"path": "iterators/iterate_all_wirecard_invoices.py",
"copies": "1",
"size": "1158",
"license": "mit",
"hash": -536077513841945100,
"line_mean": 26.5714285714,
"line_max": 105,
"alpha_frac": 0.7029360967,
"autogenerated": false,
"ratio": 2.9242424242424243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4127178520942424,
"avg_score": null,
"num_lines": null
} |
# An iterator is an interface like construct that Python supports.
# Many things are iterable, as we saw above.
# There are some other useful tools in the itertools module.
# In general, don't import * on a library.
from itertools import *
# assuming we still have string from earlier.
# Make an iterator that cycles through a ROT13 alphabet.
def rot13():
midval = ord('a')+13
# Python can have functions inside of functions.
def rotator(letter):
oval = ord(letter)
if oval >= midval:
return chr(oval-13)
else:
return chr(oval+13)
# a lot like map, except returns an iterator.
return imap(rotator, string.ascii_lowercase)
# We get an iterator back, not a list.
print rot13()
# Simple use.
print "The ROT13 alphabet."
for i, letter in enumerate(rot13()):
print i+1, letter
# Get the combinations of a poker hand
card_values = list(string.digits[2:])+["10"]+list("JQKA")
card_suits = ["c", "d", "h", "s"]
# Generate a cartesian product of values+suits, then create sequence.
cards = [card+suit for card, suit in product(card_values, card_suits)]
print cards
# Generate all the 5 hand combinations we can.
# There are alot, you will probably want to ctrl-c this....
for fivecards in combinations(cards, 5):
print fivecards
| {
"repo_name": "jeremyosborne/python",
"path": "iters/itools.py",
"copies": "1",
"size": "1301",
"license": "mit",
"hash": -6827493893068646000,
"line_mean": 32.358974359,
"line_max": 70,
"alpha_frac": 0.6894696387,
"autogenerated": false,
"ratio": 3.5546448087431695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4744114447443169,
"avg_score": null,
"num_lines": null
} |
A=[]
limit = 10 ** 6
def fillA() :
B = []
inp = file('matrix.txt')
t = inp.readline()
while t!="":
# K = t.strip().split()
K = t.strip().split(',')
B.append(K)
t = inp.readline()
# print B
for i in range(0,len(B)):
A.append([])
for b in B:
for i in range(0,len(B)):
A[i].append(int(b[i]))
# print A
print len(A)
fillA()
L= len(A)
print 'L=',L
def distance(m,i,n,j):
'''return the distance from (m,i) to (n,j)
note that m,n is the column number and i,j are row number'''
if m > n: return -1
elif m == n:
#A[m][i] -> A[m][j]
s = 0
if i < j:
for k in range(i,j+1): s+= A[m][k]
else:
for k in range(j,i+1): s+= A[m][k]
return s
elif m + 1 == n:
t1= A[m][i] + distance(n,i,n,j)
t2= distance(m,i,m,j) + A[n][j]
if t1< t2: return t1
else: return t2
else:
minimum = limit
middle = int((m+n)/2)
for k in range(0,len(A[m])) :
s = distance(m,i,middle,k) + distance(middle,k,n,j) - A[middle][k]
# print s,minimum,middle,k,distance(m,i,middle,k), distance(middle,k,n,j)
if s < minimum:
# print s,minimum,middle,k,distance(m,i,middle,k), distance(middle,k,n,j)
minimum = s
return minimum
def fillColumn(n):
'''corresponding to each case in n th column, return the distance
from the first column '''
l = []
if n == 0:
for i in range(0,L):
l.append(A[0][i])
return l
else:
l1 = fillColumn(n-1)
for k in range(0,L) :
minimum = limit
for i in range(0,L) :
t= l1[i] + distance (n-1,i,n,k) - A[n-1][i]
if t < minimum: minimum = t
l.append(minimum)
return l
X= fillColumn(79)
print X, min(X)
| {
"repo_name": "nguyenkims/projecteuler-python",
"path": "src/p82.py",
"copies": "1",
"size": "1603",
"license": "mit",
"hash": -7209763780339564000,
"line_mean": 22.231884058,
"line_max": 77,
"alpha_frac": 0.5651902682,
"autogenerated": false,
"ratio": 2.1989026063100137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7817135655727168,
"avg_score": 0.08939144375656902,
"num_lines": 69
} |
"""An LRU bounded set.
This module implements a Least Recently Used bounded set.
"""
class _Node(object):
__slots__ = ('value', 'previous', 'next')
# For use as a doubly linked list.
def __init__(self, value, previous_node=None, next_node=None):
self.value = value
self.previous = previous_node
self.next = next_node
class LRUSet(object):
"""Implements a bounded lru set with O(1) behavior.
"""
def __init__(self, max_size):
self.max_size = max_size
self.current_size = 0
self._lookup_table = {}
self._head = None
self._tail = None
def __contains__(self, item):
if item in self._lookup_table:
# Remove it from its current position and
# add it to the end of the list.
node = self._lookup_table.get(item)
if node != self._head and node != self._tail:
# We can shortcut the add/remove step
# by just moving the node to the end of the
# linked list.
node.previous.next = node.next
node.next.previous = node.previous
self._tail.next = node
self._tail = node
else:
self.remove(item)
self.add(item)
return True
else:
return False
def add(self, item):
# Interestingly, this next line is faster than
# 'item in self'
if self.__contains__(item):
return
node = _Node(item, previous_node=self._tail)
self._lookup_table[item] = node
self.current_size += 1
# If this is the first time an item is being
# added, the head reference needs to be set.
if self._head is None:
self._head = node
node.previous = None
# If this is the second time an item is being
# added, the tail reference needs to be set.
elif self._tail is None:
self._head.next = node
node.previous = self._head
self._tail = node
# From the third item and onwards, simply
# add the item to the end of the list.
else:
self._tail.next = node
self._tail = node
if self.current_size > self.max_size:
# The head reference is always the least
# recently used. This operation is inlined
# instead of just calling remove() for faster
# performance (~8-9% improvement).
del self._lookup_table[self._head.value]
self._head = self._head.next
self.current_size -= 1
def remove(self, item):
node = self._lookup_table[item]
# Again, there is the need to special case if its
# either the head or tail reference that is going to
# be removed.
if node is self._head:
self._head = node.next
elif node is self._tail:
self._tail = node.previous
else:
node.previous.next = node.next
node.next.previous = node.previous
del self._lookup_table[item]
self.current_size -= 1
def __len__(self):
return self.current_size
def __iter__(self):
current = self._head
while current is not None:
yield current.value
current = current.next
| {
"repo_name": "jamesls/lruset",
"path": "lruset.py",
"copies": "1",
"size": "3394",
"license": "bsd-3-clause",
"hash": 674682309699385900,
"line_mean": 32.2745098039,
"line_max": 66,
"alpha_frac": 0.5421331762,
"autogenerated": false,
"ratio": 4.2531328320802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001463271875914545,
"num_lines": 102
} |
''' anlyzClassifTree: annotate dot trees produced by weka's graphviz plugin
Created on Apr 28, 2019
@version 0.1 191020
@author: rik
'''
import math
import re
import pygraphviz as pgv
import pygraphml
class DTNode():
def __init__(self,id):
self.id = id
self.name = ''
self.leaf = None
# if self.leaf
self.predicate = None
self.nsample = 0
self.nneg = 0
self.npos = 0
self.nsuccess = 0
self.misclass = 0
# else
self.attrib = None
self.cummPos = 0
self.cummNeg = 0
self.totSample = 0
self.cummSucc = 0
self.cummFail = 0
self.outNbr = []
self.inNbr = []
self.depth = None
# print('Node',str(self))
def __str__(self):
return '%s' % (self.id)
def addToNbr(self,nbr,lbl):
l = DTLink(self,nbr,lbl)
self.outNbr.append(l)
nbr.inNbr.append(l)
# print('addNbr',str(l))
global allEdges
allEdges[(self.id,nbr.id)] = l
return l
def useName(self,name):
self.name = name
if name.find('(') == -1:
self.leaf = False
self.attrib = name
else:
self.leaf = True
m = re.match(LeafNamePat,name)
if m == None:
print('addName: unmatched name?!', self, name)
return
mdict = m.groupdict()
self.predicate = (True if mdict['pred'] == BoolTrueString else False)
# https://waikato.github.io/weka-wiki/not_so_faq/j48_numbers/
# If your data has missing attribute values then you will end up with fractional instances at the leafs.
self.nsample = float(mdict['nsample'])
if mdict['misclass'] == None:
self.misclass = 0
else:
self.misclass = float(mdict['misclass'])
self.nsuccess = self.nsample - self.misclass
if self.predicate:
self.npos = self.nsample - self.misclass
self.nneg = self.nsample - self.npos
else:
self.nneg = self.nsample - self.misclass
self.npos = self.nsample - self.nneg
def info(self):
istr = ''
if self.leaf:
istr = '. %s %d/%d' % (self.predicate,self.npos,self.nneg)
else:
istr = '? %s' % (self.attrib)
return istr
def accummSample(self):
tot = 0
if self.leaf:
assert len(self.outNbr) == 0, 'leaves have no outNbr?!'
self.cummPos = self.npos
self.cummNeg = self.nneg
self.totSample = self.nsample
return self.npos, self.nneg, self.nsample
for edge in self.outNbr:
onbr = edge.target
onpos,onneg,otot = onbr.accummSample()
self.cummPos += onpos
self.cummNeg += onneg
self.totSample += otot
return self.cummPos, self.cummNeg, self.totSample
def accummSuccess(self):
nsucc = 0
nfail = 0
if self.leaf:
self.cummSucc = self.nsuccess
self.cummFail = self.misclass
return self.nsuccess, self.misclass
for edge in self.outNbr:
onbr = edge.target
onnsucc, onnfail = onbr.accummSuccess()
nsucc += onnsucc
nfail += onnfail
self.cummSucc = nsucc
self.cummFail = nfail
return nsucc,nfail
def cummCounts(self):
istr = '%d = %d + %d' % (self.totSample,self.cummPos,self.cummNeg)
return istr
class DTLink():
def __init__(self,src,target,lbl):
self.src = src
self.target = target
self.label = lbl
m = re.match(EdgePat,lbl)
if m == None:
print('DTLink: unmatched lbl?!', self, lbl)
return
mdict = m.groupdict()
self.reln = mdict['reln']
self.val = mdict['val']
def __str__(self):
return '%s->%s: %s %s' % (self.src.id,self.target.id,self.reln,self.val)
# https://waikato.github.io/weka-wiki/not_so_faq/j48_numbers/
# What do those numbers mean in a J48 tree?
# The first number is the total number of instances (weight of instances) reaching the leaf.
# The second number is the number (weight) of those instances that are misclassified.
LeafNameRE = r'(?P<pred>.+) \((?P<nsample>[0-9.]+)(/(?P<misclass>[0-9.]+))?\)'
LeafNamePat = re.compile(LeafNameRE)
EdgeRE = r'(?P<reln>.+) (?P<val>.+)'
EdgePat = re.compile(EdgeRE)
def loadGraphML(inf):
parser = pygraphml.GraphMLParser()
g = parser.parse(inf)
return g
def bldDT(dotGraph):
'''build decision tree from dot graph
'''
global visited
global allEdges
def _dfs(g,momDT,kidNd,kidID,depth,elbl):
'NB: _dfs passed mom=DTNode, kid=dotGraph.Node'
global visited
name = kidNd.attr['label']
kidDT = visited[kidID]
kidDT.useName(name)
edge = momDT.addToNbr(kidDT,elbl)
succ = dotGraph.successors(kidID)
assert len(succ) == 0 or len(succ) == 2, 'ASSUME binary tree?! %s' % kidDT
for succIdx,toNd in enumerate(succ):
dedge = dotGraph.get_edge(kidID,toNd)
tondID = str(toNd)
# NB: create DTNode, but don't yet know it's label attribute
toDT = DTNode(tondID)
toDT.depth = depth+1
visited[tondID] = toDT
elbl = dedge.attr['label']
_dfs(dotGraph,kidDT,toNd,tondID,depth+1,elbl)
root = dotGraph.nodes()[0]
rootname = root.attr['label']
visited = {}
allEdges = {}
rootID = str(root)
# NB: dotGraph IDs also used for DTNodes, to facilitate forward-linking by DTNode.addToNbr()
dt = DTNode(rootID)
dt.useName(rootname)
dt.depth = 0
visited[rootID] = dt
succ = dotGraph.successors(root)
assert len(succ) == 0 or len(succ) == 2, 'ASSUME binary tree?! %s' % dt
for succIdx,toNd in enumerate(succ):
dedge = dotGraph.get_edge(root,toNd)
elbl = dedge.attr['label']
toID = str(toNd)
toDT = DTNode(toID)
toDT.depth = 1
visited[toID] = toDT
# NB: _bfs passed dt=DTNode, toNd=dotGraph.Node()
_dfs(dotGraph,dt,toNd,toID,1,elbl)
return dt
def rptDT(allDTID):
for dtID in allDTID:
dt = visited[dtID]
dtInfo = dt.info()
print('%s %s %s %s' % (dt.depth*' ', dt, dt.info(), dt.cummCounts()))
def dtAttrib2dot(gname,maxLeafSample,newdotf):
'''produce dot graph with attributes appropriate to trained decision tree
- LEAF node size proportional to nsample
- decision reln,val incorporated into node label
- also cummPos,cummNeg,nsuccess,nfail
- different shapes for TRUE/FALSE
- 2do: fill indicating npos/nneg
'''
minRadius = 0.125
maxRadius = 2.
def _scale(nsample):
s = float(nsample)/maxLeafSample
r = math.sqrt(s)
d = 2 * (maxRadius * r + minRadius)
return d
noteWorthySampleSize = 10
piAttr = {'fval': 1, 'nedge': 1}
ggenAttr = {'degree','ggen','q'}
dots = open(newdotf,'w')
dots.write('digraph %s {\n' % (gname))
global visited
global allEdges
allDTID = list(visited.keys())
allDTID.sort(key=lambda k: int(k[1:])) # drop 'N' prefix for node, treat as int
for dtID in allDTID:
dt = visited[dtID]
if dt.leaf:
shape = 'circle' if dt.predicate else 'square'
size = _scale(dt.totSample)
sizeStr = ('%6.3f' % (size)).strip()
lbl = '%d / %d' % (dt.npos,dt.nneg)
posRatio = float(dt.npos) / dt.totSample
lineType = 'solid'
ndLine = '%s [label="%s" shape="%s" size="%s" style="%s" ]\n' % \
(dt.id,lbl,shape,sizeStr,lineType)
else:
shape = 'diamond'
outlink = dt.outNbr[0]
if dt.attrib in piAttr:
color = 'aquamarine'
elif dt.attrib in ggenAttr:
color = 'mistyrose'
else:
color = 'cornsilk'
lbl = '%s %s %s\n(%d/%d)\n[%d/%d]' % (dt.attrib,outlink.reln,outlink.val, \
dt.cummPos,dt.cummNeg,dt.cummSucc,dt.cummFail)
ndLine = '%s [label="%s" shape="%s" size="1" style="filled" fillcolor="%s" ]\n' % \
(dt.id,lbl,shape,color)
dots.write(ndLine)
allEdgePairs = list(allEdges.keys())
allEdgePairs.sort(key=lambda k: (int(k[0][1:]),int(k[1][1:])))
for epair in allEdgePairs:
dots.write('%s->%s\n' % (epair[0],epair[1]))
dots.write('}\n')
dots.close()
if __name__ == '__main__':
gname = 'graphName'
dataDir = '<pathToFile>'
dotfile = dataDir + gname + '.dot'
dotGraph = pgv.AGraph(dotfile)
global BoolTrueString
BoolTrueString = 'TRUE' # 't'
assert dotGraph.directed == True, 'ASSUME DIRECTED graph?!'
dt = bldDT(dotGraph)
npos,nneg,totsample = dt.accummSample()
nsucc,nfail = dt.accummSuccess()
global visited
allDTID = list(visited.keys())
allDTID.sort(key=lambda k: int(k[1:])) # drop 'N' prefix for node, treat as int
maxLeaf = None
maxLeafSample = 0
for dtID in allDTID:
dt = visited[dtID]
if dt.leaf and dt.totSample > maxLeafSample:
maxLeafSample = dt.totSample
maxLeaf = dt
print('done TotSample=%d NPos=%d NNeg=%d MaxLeafSample=%d (%s)' % \
(totsample,npos,nneg,maxLeafSample,maxLeaf.id))
newdotf = dataDir + '%s-dtattrib.dot' % (gname)
dtAttrib2dot(gname,maxLeafSample,newdotf)
print('done') | {
"repo_name": "rbelew/rikHak",
"path": "anlyzClassifTree.py",
"copies": "1",
"size": "8355",
"license": "apache-2.0",
"hash": -2588295067356979000,
"line_mean": 24.2447129909,
"line_max": 108,
"alpha_frac": 0.6488330341,
"autogenerated": false,
"ratio": 2.562883435582822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3711716469682822,
"avg_score": null,
"num_lines": null
} |
"""An LZW-based string packer.
LzwLib stores a set of strings as compressed binary data. It can also
generate Pico-8 Lua code capable of accessing a string given a string's ID (
returned by the encoder method). The goal is to make it easy to write Pico-8
games that use a large quantity of English text without storing that text in
the code region of the cart.
All strings added to the data structure contribute to a single decoder
dictionary. The Lua client generates this dictionary from the complete LZW
data the first time a string is accessed.
The ID of a string is a 16-bit value equal to the address of the compressed
data for the string in memory. The compressed string is stored as two bytes
representing its compressed size (LSB first) followed by the codes. The
ID is returned encoded as a string of 6-bit characters codes in the "pscii"
character set.
This is meant to be a drop-in replacement for TextLib, which was a silly idea
that never compressed very well. Unlike TextLib, LzwLib does not distinguish
between word and non-word characters, and preserves all characters including
spaces, with the exception of newlines which are converted to single spaces.
"""
from collections import OrderedDict
import re
__all__ = ['LzwLib', 'encode_pscii']
# A character set, which I'm going to call "pscii", consisting of all of the
# characters supported by TextLib. This corresponds to all of the characters
# supported by Pico-8 v0.1.3. Notable missing chars include: $ \ @ ` (I
# believe 0.1.4 will add support for "\".)
CHAR_TABLE = ' !"#%\'()*+,-./0123456789:;<=>?abcdefghijklmnopqrstuvwxyz[]^_{~}'
# For string-encoding IDs, add a character to make it an even 64 chars.
CHAR_TABLE_FOR_SID = CHAR_TABLE + '@'
# The Lua code for a string unpacker.
#
# _t(sid) unpacks and returns one or more strings. sid is a string containing
# one or more string-encoded string IDs, three chars each. If sid contains
# multiple three-char IDs, the return value is each of those strings
# concatenated. This allows for the original Lua source to concatenate string
# IDs as if they are the original string, then call _t() at the last moment
# to unpack the aggregate result.
#
# _c(o) converts a character code to a single-character string.
# _o(c) converts a single-character string to its character code (or nil).
# _tlinit is true after the first call to _t().
# _tladdr is the starting address of the compressed data.
# _ct is the character table.
#
# (Note: Unlike TextLib's P8ADVENT_LUA_PAT, this is not a format pattern.)
P8ADVENT_LUA = """
_tl={a=nil,t=nil,d=nil,w=nil}
function _tl:c(o) return sub(_tl.t,o+1,o+1) end
function _tl:o(c)
local i
for i=1,#self.t do
if sub(self.t,i,i)==c then return i-1 end
end
return 63
end
function _t(s)
local p,r,c,n,i,a,l,bp,w,b
local tid
if _tl.d == nil then
_tl.d={}
n=bor(peek(_tl.a),shl(peek(_tl.a+1),8))
a=_tl.a+2
while n>0 do
p=nil
i=bor(peek(a),shl(peek(a+1),8))
a+=5
bp=0
while i>0 do
c=0
w=_tl.w
for bi=1,w do
b=band(shr(peek(a),bp),1)
c=bor(c,shl(b,bi-1))
bp+=1
if bp==8 then
a+=1
bp=0
end
end
r=nil
if c<=(#_tl.t-1) then
r=_tl:c(c)
elseif _tl.d[c-#_tl.t+1]~=nil then
r=_tl.d[c-#_tl.t+1]
end
if p~=nil and #_tl.d+#_tl.t<tl.mt then
if r~=nil then
_tl.d[#_tl.d+1]=p..sub(r,1,1)
else
_tl.d[c-#_tl.t+1]=p..sub(p,1,1)
r=_tl.d[c-#_tl.t+1]
end
if #_tl.d+#_tl.t==(2^_tl.w-1) then
_tl.w+=1
end
end
p=r
i-=1
end
n-=1
if bp~=0 then
a+=1
bp=0
end
end
end
r=''
for i=1,#s,3 do
a=bor(bor(_tl:o(sub(s,i,i)),
shl(_tl:o(sub(s,i+1,i+1)),6)),
shl(_tl:o(sub(s,i+2,i+2)),12))
l=bor(peek(a),shl(peek(a+1),8))
a+=2
tid=bor(peek(a),shl(peek(a+1),8))
a+=2
_tl.w=peek(a)
a+=1
bp=0
while l>0 do
c=0
w=_tl.w
for bi=1,w do
b=band(shr(peek(a),bp),1)
c=bor(c,shl(b,bi-1))
bp+=1
if bp==8 then
a+=1
bp=0
end
end
l-=1
if c<=(#_tl.t-1) then
r=r.._tl:c(c)
else
r=r.._tl.d[c-#_tl.t+1]
end
tid+=1
if tid==(2^_tl.w) then
_tl.w+=1
end
end
end
return r
end
"""
LZW_STARTING_WIDTH = 7
MAX_TABLE_ENTRY_COUNT = 4096
def _generate_lua(start_addr):
"""Generate the Lua code for the string unpacker.
Args:
start_addr: The starting address of the data region.
Returns:
The Lua code, as a string.
"""
# Remove leading spaces to reduce char footprint.
lua = re.sub(r'\n +', '\n', P8ADVENT_LUA)
return ('{}\n_tl.t="{}"\n_tl.a={}\n_tl.w={}\n_tl.mt={}\n'.format(
lua,
re.sub(r'"', '"..\'"\'.."', CHAR_TABLE),
start_addr,
LZW_STARTING_WIDTH,
MAX_TABLE_ENTRY_COUNT))
class Error(Exception):
"""A base class for errors."""
pass
class CharOutOfRange(Error):
"""A character was in a string that is not supported by pscii."""
def __init__(self, *args, **kwargs):
self.char = kwargs.get('char')
self.pos = kwargs.get('pos')
super().__init__(*args, **kwargs)
def __str__(self):
return ('Character out of range: {}, pos:{}'.format(
repr(self.char),
self.pos))
class TooMuchDataError(Error):
"""The compressed data does not fit in the given cart data range.
"""
def __init__(self, msg):
self._msg = msg
def __str__(self):
return 'Too much data: {}'.format(self._msg)
def encode_pscii(s):
"""Encode an ASCII string as a bytestring in terms of the character table.
Args:
s: The Python string to encode.
Returns:
The bytestring of indexes into CHAR_TABLE.
Raises:
ValueError: The string contains a character not in CHAR_TABLE.
"""
result = bytearray()
lower_s = s.lower()
i = c = None
try:
for i, c in enumerate(lower_s):
result.append(CHAR_TABLE.index(c))
except ValueError as e:
raise CharOutOfRange(c, i)
return bytes(result)
class LzwLib:
def __init__(self, start_addr=0, end_addr=0x4300):
"""Initializer.
You can use arguments to customize the addresses and maximum memory
ranges for the compressed data in the cart and for the lookup
dictionary in RAM.
Args:
start_addr: The Pico-8 cart data starting address for the data.
end_addr: The Pico-8 cart data ending address for the data + 1.
"""
self._start_addr = start_addr
self._end_addr = end_addr
self._string_id_map = dict()
self._data = bytearray()
self._dict = OrderedDict(
(CHAR_TABLE[i], i) for i in range(len(CHAR_TABLE)))
self._code_width = LZW_STARTING_WIDTH
self._code_bit_pos = 0
self._code_buffer = None
def id_for_string(self, s):
s = re.sub(r'\s+', ' ', s.lower())
if s not in self._string_id_map:
# TODO: dict length - 1?
expected_table_id = len(self._dict)
expected_code_width = self._code_width
self._code_buffer = bytearray()
self._code_bit_pos = 0
sid = self._start_addr + 2 + len(self._data)
start_i = 0
code_count = 0
while start_i < len(s):
end_i = start_i + 1
while end_i < len(s) and s[start_i:end_i] in self._dict:
end_i += 1
created_new_entry = None
if s[start_i:end_i] not in self._dict:
# (Condition may or may not be false at the end of the
# string, so we check.)
if len(self._dict) < MAX_TABLE_ENTRY_COUNT:
self._dict[s[start_i:end_i]] = len(self._dict)
created_new_entry = self._dict[s[start_i:end_i]]
end_i -= 1
code = self._dict[s[start_i:end_i]]
for i in range(self._code_width):
if self._code_bit_pos == 0:
self._code_buffer.append(0)
self._code_buffer[-1] |= (code & 1) << self._code_bit_pos
code >>= 1
self._code_bit_pos = (self._code_bit_pos + 1) % 8
if (created_new_entry is not None and
created_new_entry == (2**self._code_width - 1)):
self._code_width += 1
code_count += 1
start_i = end_i
self._data.append(code_count & 255)
self._data.append(code_count >> 8)
self._data.append(expected_table_id & 255)
self._data.append(expected_table_id >> 8)
self._data.append(expected_code_width)
self._data.extend(self._code_buffer)
encoded_sid = (CHAR_TABLE_FOR_SID[sid & 63] +
CHAR_TABLE_FOR_SID[(sid >> 6) & 63] +
CHAR_TABLE_FOR_SID[(sid >> 12) & 63])
self._string_id_map[s] = encoded_sid
return self._string_id_map[s]
def as_bytes(self):
"""Get the binary data for the packed text.
Returns:
The data, as a bytearray.
Raises:
TooMuchDataError: The given strings do not fit into the memory
ranges given to __init__.
"""
string_count = len(self._string_id_map)
data = (bytearray([string_count & 255, string_count >> 8]) +
self._data)
total_string_size = sum(len(k) for k in self._string_id_map.keys())
compressed_data_size = len(data)
lookup_table_count = len(self._dict)
lookup_table_size = sum(len(k) for k in self._dict.keys())
print(
'DEBUG: unique string count: {string_count}\n'
'DEBUG: total unique string size: {total_string_size}\n'
'DEBUG: lookup table entry count: {lookup_table_count}\n'
'DEBUG: compressed data size: {compressed_data_size}\n'
'DEBUG: lookup table size: {lookup_table_size}\n'
.format(**locals()))
if len(data) > (self._end_addr - self._start_addr):
raise TooMuchDataError(
'compressed data is too large: {} bytes do not fit between '
'addresses {} and {}'.format(
len(data), self._start_addr, self._end_addr))
return data
def generate_lua(self):
"""Generate the Lua code for accessing this LzwLib.
Returns:
The Lua code.
"""
return _generate_lua(self._start_addr)
| {
"repo_name": "dansanderson/p8advent",
"path": "p8advent/lzwlib.py",
"copies": "1",
"size": "10643",
"license": "mit",
"hash": 7100040366224698000,
"line_mean": 29.3219373219,
"line_max": 79,
"alpha_frac": 0.5711735413,
"autogenerated": false,
"ratio": 3.294026617146394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4365200158446394,
"avg_score": null,
"num_lines": null
} |
""" An (master) roi experiment for the catreward project. """
from roi.base import Roi, Mean
class Catreward(Roi):
""" A Roi analysis class, customized for the catreward project. """
def __init__(self, TR, roi_name, trials, durations, data):
Roi.__init__(self, TR, roi_name, trials, durations, data)
self.data['meta']['bold'] = self.roi_name
self.create_bold(preprocess=True)
self.create_hrf(function_name='double_gamma')
# --
# Responses
def model_020(self):
""" Behavoiral/category responses as separate regressors. """
data_to_use = ['resp1', 'resp6']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, box=False)
self.fit(norm='zscore')
def model_021(self):
""" The correct category responses as separate regressors. """
data_to_use = ['cresp1', 'cresp6']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, box=False)
self.fit(norm='zscore')
# --
# Reaction times
def model_03(self):
""" Reaction times. """
data_to_use = ['rt']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# Distances
def model_040(self):
""" Outcome parameter distances. """
data_to_use = ['distance']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_041(self):
""" Contra-outcome parameter distances. """
data_to_use = ['distance_opp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# Simlarity metrics
def model_042(self):
""" Outcome similarity (euclidian). """
data_to_use = ['rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_043(self):
""" Outcome similarity (exponential). """
data_to_use = ['exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_044(self):
""" Outcome similarity (gaussian). """
data_to_use = ['gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_045(self):
""" Contra-outcome similarity (exponential). """
data_to_use = ['exp_opp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_046(self):
""" Contra-outcome similarity (gaussian). """
data_to_use = ['gauss_opp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_047(self):
""" Outcome and contra-outcome similarities (exponential),
as separate regressors. """
data_to_use = ['exp', 'exp_opp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_048(self):
""" Outcome and contra-outcome similarities (gaussian),
as separate regressors. """
data_to_use = ['gauss', 'gauss_opp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# Gains and losses
def model_050(self):
""" Gains and losses. """
data_to_use = ['gl']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_051(self):
""" Gains and losses, diminished by (exponential) similarity. """
data_to_use = ['gl_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_052(self):
""" Gains and losses, diminished by (gaussian) similarity. """
data_to_use = ['gl_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_053(self):
""" Gains and losses, diminished by (euclidian) similarity. """
data_to_use = ['gl_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# Accuracy
def model_060(self):
""" Behavioral accuracy. """
data_to_use = ['acc']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_061(self):
""" Behavioral accuracy, diminished by (exponential) similarity. """
data_to_use = ['acc_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_062(self):
""" Behavioral accuracy, diminished by (gaussian) similarity. """
data_to_use = ['acc_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_063(self):
""" Behavioral accuracy, diminished by (euclidian) similarity. """
data_to_use = ['acc_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# RL - normal
def model_070(self):
""" RPE - derived from accuracy. """
data_to_use = ['rpe_acc']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_071(self):
""" Value - derived from accuracy. """
data_to_use = ['value_acc']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_072(self):
""" RPE - derived from gains and loses. """
data_to_use = ['rpe_gl']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_073(self):
""" Value - derived from gains and losses. """
data_to_use = ['value_gl']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# --
# RL - similarity diminished
# Accuracy
def model_080(self):
""" RPE - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['rpe_acc_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_081(self):
""" Value - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['value_acc_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_090(self):
""" RPE - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['rpe_acc_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_091(self):
""" Value - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['value_acc_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_100(self):
""" RPE - derived from accuracy diminished by (euclidian)
similarity. """
data_to_use = ['rpe_acc_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_101(self):
""" Value - derived from accuracy diminished by (euclidian)
similarity. """
data_to_use = ['value_acc_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
# Gains and losses
def model_110(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['rpe_gl_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_111(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['value_gl_exp']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_120(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['rpe_gl_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_121(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['value_gl_gauss']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_130(self):
""" RPE - derived from gains and losses diminished by (euclidian)
similarity. """
data_to_use = ['rpe_gl_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
def model_131(self):
""" Value - derived from gains and losses diminished by (euclidian)
similarity. """
data_to_use = ['value_gl_rdis']
self.data['meta']['dm'] = ["box",] + data_to_use
self.create_dm_param(names=data_to_use)
self.fit(norm='zscore')
class CatMean(Mean, Catreward):
""" A Roi analysis class, customized for the catreward project.
Unlike Catreward, this reads in the average bold data from a
text file. """
def __init__(self, TR, roi_name, trials, durations, data):
Catreward.__init__(self, TR, roi_name, trials, durations, data)
Mean.__init__(self, TR, roi_name, trials, durations, data)
self.data['meta']['bold'] = self.roi_name
self.create_bold(preprocess=True)
self.create_hrf(function_name='double_gamma')
| {
"repo_name": "parenthetical-e/fmri",
"path": "catreward/roi/archive/base.py",
"copies": "1",
"size": "11286",
"license": "bsd-2-clause",
"hash": 136113654652819020,
"line_mean": 25.6808510638,
"line_max": 78,
"alpha_frac": 0.5436824384,
"autogenerated": false,
"ratio": 3.310648283954239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354330722354239,
"avg_score": null,
"num_lines": null
} |
# Anna and Brian order n items at a restaurant, but Anna declines to eat any of
# the kth item (where 0 <= k < n) due to an allergy. When the check comes, they
# decide to split the cost of all the items they shared; however, Brian may
# have forgotten that they didn't split the kth item and accidentally charged
# Anna for it.
#
# You are given n, k, the cost of each of the n items, and the total amount of
# money that Brian charged Anna for her portion of the bill. If the bill is
# fairly split, print Bon Appetit; otherwise, print the amount of money that
# Brian must refund to Anna.
#
# Input Format:
# The first line contains two space-separated integers denoting the respective
# values of n (the number of items ordered) and k (the 0-based index of the
# item that Anna did not eat).
#
# The second line contains n space-separated integers where each integer i
# denotes the cost, c[i], of item i (where 0 <= i < n).
#
# The third line contains an integer, b, denoting the amount of money that
# Brian charged Anna for her share of the bill.
#
# Constraints:
# 2 <= n <= 10^5
# 0 <= k < n
# 0 <= c[i] <= 10^4
# 0 <= b <= sum(c[i])
#
# Output Format:
# If Brian did not overcharge Anna, print Bon Appetit on a new line; otherwise,
# print the difference (i.e., charged - actual) that Brian must refund to Anna
# (it is guaranteed that this will always be an integer).
n, k = map(int, raw_input().split())
costs = map(int, raw_input().split())
total = int(raw_input())
refund = total - ((sum(costs) - costs[k]) / 2)
print "Bon Appetit" if refund == 0 else refund
| {
"repo_name": "chinhtle/python_fun",
"path": "hacker_rank/algorithms/implementation/bonappetit.py",
"copies": "1",
"size": "1588",
"license": "mit",
"hash": -3780393130072891400,
"line_mean": 38.7,
"line_max": 79,
"alpha_frac": 0.6958438287,
"autogenerated": false,
"ratio": 3.1445544554455447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43403982841455446,
"avg_score": null,
"num_lines": null
} |
""" Annabel Droste
program to combine the datasets with the countrynames and the coordinates with their observations, consequently the data is put in the correct format
"""
## [{"country": "countryname",
## "observation": [{"genus": "genusname",
## "details": [{"species": "speciesname",
## "number": "number"},
## {"species": etc.}...],
## {"genus": etc.}...]},
## {"country": etc.}...]
from collections import Counter
from collections import defaultdict
import json
import csv
input_file = csv.DictReader(open("C:\Users\Annabel\Documents\GitHub\Project\data\countriescoodump1small.txt"), delimiter='\n')
input_file2 = csv.DictReader(open("C:\Users\Annabel\Documents\GitHub\Project\data\countriescoodump21small.txt"), delimiter='\n')
input_data = open("C:\Users\Annabel\Documents\GitHub\Project\data\datadump11genussmall.json", "r")
input_data2 = open("C:\Users\Annabel\Documents\GitHub\Project\data\datadump21genussmall.json", "r")
data1 = json.load(input_data)
data2 = json.load(input_data2)
new_data1 = []
new_data2 = []
""" replaces the coordinates from the data with the countrynames """
def assignCountries(countryfile, datajson, new_data):
i = 0
for row in countryfile:
datajson[i]["country"]= row["\xef\xbb\xbfcountry"]
new_data.append(datajson[i])
i+= 1
""" builds a nested structure, if the country has not been seen before a new dictionary is added, otherwise the data are added to the country key in collect_dict """
def processData(collect_dict, datajson):
for entry in datajson:
collect_dict.setdefault(entry["country"], {}).setdefault(entry["scientificName"].split(" ")[0], []).append(entry["vernacularName"])
assignCountries(input_file2, data2, new_data2)
assignCountries(input_file, data1, new_data1)
print "countrynames done"
final_form = {}
land = {}
processData(land, new_data1)
processData(land, new_data2)
print "basis data structure"
for el in land:
i = 0
final_form[el] = []
for it in land[el]:
result = dict(Counter(land[el][it]))
final_form[el].append({"genus": it, "details": []})
for species in result:
final_form[el][i]["details"].append({"species": species, "number": result[species]})
i += 1
final = []
for i in final_form:
final.append({"country": i, "observation":final_form[i]})
print "compression datastructure"
with open("landdeelfinal.json", "w") as d:
json.dump(final,d,encoding="cp1252")
print "done"
| {
"repo_name": "AnnabelD/Project",
"path": "code/scriptspython/final_datastructure.py",
"copies": "1",
"size": "2577",
"license": "mit",
"hash": 3778327865455890400,
"line_mean": 37.4626865672,
"line_max": 165,
"alpha_frac": 0.65774156,
"autogenerated": false,
"ratio": 3.4223107569721116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9497877192704485,
"avg_score": 0.016435024853525475,
"num_lines": 67
} |
# Annabel Droste
# removes all the superfluous information from the dataset and creates a json file with the necessary info.
import csv
import json
input_file1 = csv.DictReader(open("C:\Users\Annabel\Documents\GitHub\Project\data\dataset_eu2_1.txt"), delimiter='\t')
output_file1 = "C:\Users\Annabel\Documents\GitHub\Project\data\datadump11genussmall.json"
info = []
print "start"
""" remove all the superfluous informations from the dataset """
def processData(file_input, container, file_output):
for row in file_input:
if row["individualCount"] != "0":
map(row.pop,["institutionCode", "collectionCode", "datasetName", "ownerInstitutionCode", \
"basisOfRecord", "occurrenceID", "catalogNumber", "occurrenceStatus", "samplingEffort", "startDayOfYear",\
"endDayOfYear", "continent", "kingdom", "phylum", "class", "\xef\xbb\xbfid", "year", "geodeticDatum", "footprintWKT", "taxonID"])
lat = row.pop("decimalLatitude")
lon = row.pop("decimalLongitude")
row["country"] = [lat,lon]
container.append(row)
with open("C:\Users\Annabel\Documents\GitHub\Project\\" + file_output, "w") as o:
json.dump(container,o, encoding="cp1252")
processData(input_file1,info,output_file1)
input_file2 = csv.DictReader(open("C:\Users\Annabel\Documents\GitHub\Project\data\dataset_eu2.txt"), delimiter='\t')
output_file2 = "C:\Users\Annabel\Documents\GitHub\Project\data\datadump21genussmall.json"
info = []
print "start2"
processData(input_file2,info,output_file2)
print "done"
| {
"repo_name": "AnnabelD/Project",
"path": "code/scriptspython/tsv_json_dump_small.py",
"copies": "1",
"size": "1598",
"license": "mit",
"hash": 5443658930186019000,
"line_mean": 42.1891891892,
"line_max": 154,
"alpha_frac": 0.6852315394,
"autogenerated": false,
"ratio": 3.2612244897959184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9233871664260365,
"avg_score": 0.04251687298711091,
"num_lines": 37
} |
# Annales Xantenses
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# Case 1: Sections split by numbers (Roman or not) followed by a period, or bracketed. Subsections split by <p> tags
def parsecase1(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = '-1'
verse = 1
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs. and skip the last part with the collection link.
if len(text) <= 0 or text.startswith('Medieval\n'):
continue
if text.isupper():
chapter = text
verse = 0
continue
passage = text
verse+=1
# check for that last line with the author name that doesn't need to be here
if passage.startswith('Medieval'):
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
if passage.endswith('SIMSON.'):
chapter = 'ANNALES XANTENSES QUI DICUNTUR.'
verse = 0
# main code
def main():
# The collection URL below and other information from the author's main page
collURL = 'http://www.thelatinlibrary.com/xanten.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = 'anonymous'
colltitle = collSOUP.title.string.strip()
date = '-'
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE title='Annales qui dicuntur Xantenses'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
title = colltitle
getp = textsoup.find_all('p')
parsecase1(getp, c, colltitle, title, author, date, url)
logger.info("Program runs successfully.")
if __name__ == '__main__':
main()
| {
"repo_name": "oudalab/phyllo",
"path": "phyllo/extractors/xanten.py",
"copies": "1",
"size": "2765",
"license": "apache-2.0",
"hash": 5289074177057845000,
"line_mean": 35.3815789474,
"line_max": 116,
"alpha_frac": 0.5967450271,
"autogenerated": false,
"ratio": 3.8998589562764456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9979129841966339,
"avg_score": 0.003494828282021261,
"num_lines": 76
} |
# Anna Mokrushina & RomanDubinin drawer
from bokeh.plotting import figure, output_file, show
from math import e
import numpy as np
from lab5 import L, f, real_func
from methods import runge_kutta, explicit_euler, recount_euler, get_next_m, progon
def main():
n = 30 # int(sys.argv[1])
x_points = np.linspace(0, 1, n+1, endpoint=True)
y_0 = 0
right_boundary = e - 1.0/e + L
h = 1.0/n
colors = ["blue", "purple", "orange", "green"]
methods = [
{"f": explicit_euler, "name": "euler"},
{"f": recount_euler, "name": "euler with recount"},
{"f": runge_kutta, "name": "Runge-Kutta"}
]
p = figure(title="", plot_width=1100, plot_height=600)
y_points = list(map(real_func, x_points))
p.line(x=x_points, y=y_points, color="black", legend="real", line_width=2)
y_points = progon(x_points, right_boundary, h, L)
p.line(x=x_points, y=y_points, color="red", legend="progon", line_width=2)
for i in range(len(methods)):
# y_points = real_func_method(x_points)
last_m = 0
y_points = [methods[i]["f"](x_points, h, y_0, last_m, f)]
next_m = get_next_m(x_points, f, methods[i]["f"], y_0, right_boundary, last_m, h)
while abs(next_m - last_m) > 0.01:
y_points.append(methods[i]["f"](x_points, h, y_0, next_m, f))
last_m = next_m
next_m = get_next_m(x_points, f, methods[i]["f"], y_0, right_boundary, last_m, h)
p.line(x=x_points, y=y_points[-1], color=colors[i], legend=methods[i]["name"])
output_file("{0}_points.html".format(n))
p.legend.orientation = "top_left"
show(p)
if __name__ == '__main__':
main()
| {
"repo_name": "neseleznev/urfu-CompExp",
"path": "Lab5/lab5bokeh.py",
"copies": "1",
"size": "1684",
"license": "mit",
"hash": 2153719290687806500,
"line_mean": 32.68,
"line_max": 93,
"alpha_frac": 0.5789786223,
"autogenerated": false,
"ratio": 2.707395498392283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8778141231525769,
"avg_score": 0.0016465778333027226,
"num_lines": 50
} |
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
import sys
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
sys.audit("nntplib.putline", self, line)
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be a unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns a unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is a unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp) from None
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
sys.audit("nntplib.connect", self, host, port)
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
sys.audit("nntplib.connect", self, host, port)
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/nntplib.py",
"copies": "2",
"size": "43262",
"license": "apache-2.0",
"hash": -1032273884095721,
"line_mean": 36.5864465682,
"line_max": 96,
"alpha_frac": 0.5792843604,
"autogenerated": false,
"ratio": 4.145060841237903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005792186650475387,
"num_lines": 1151
} |
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = [b'100', b'215', b'220', b'221', b'222', b'224', b'230', b'231', b'282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = b'\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific comamnds, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if user and e.response.startswith(b'480'):
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp.startswith(b'381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if not resp.startswith(b'281'):
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print('*put*', repr(line))
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print('*cmd*', repr(line))
line = bytes(line, "ASCII")
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print('*resp*', repr(resp))
c = resp[:1]
if c == b'4':
raise NNTPTemporaryError(resp)
if c == b'5':
raise NNTPPermanentError(resp)
if c not in b'123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == b'.':
break
if line.startswith(b'..'):
line = line[1:]
if file:
file.write(line + b'\n')
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return b''
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile(b'^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith(b'215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if not resp.startswith(b'211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if not resp.startswith(b'22'):
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = b''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT {0}'.format(id))
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD {0}'.format(id))
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY {0}'.format(id), file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE {0}'.format(id))
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile(b'^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR {0} {1}'.format(hdr, str), file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER {0}-{1}'.format(start, end), file)
xover_lines = []
for line in lines:
elem = line.split(b'\t')
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile(b'^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd('XPATH {0}'.format(id))
if not resp.startswith(b'223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if not resp.startswith(b'111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def _post(self, command, f):
resp = self.shortcmd(command)
# Raises error_??? if posting is not allowed
if not resp.startswith(b'3'):
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line.endswith(b'\n'):
line = line[:-1]
if line.startswith(b'.'):
line = b'.' + line
self.putline(line)
self.putline(b'.')
return self.getresp()
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', f)
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(id), f)
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print(resp)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
print(resp)
for item in subs:
print("%7s %s" % item)
resp = s.quit()
print(resp)
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/nntplib.py",
"copies": "1",
"size": "21051",
"license": "mit",
"hash": -3656283571190994400,
"line_mean": 33.1737012987,
"line_max": 83,
"alpha_frac": 0.5614935157,
"autogenerated": false,
"ratio": 4.107512195121951,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013811870668503717,
"num_lines": 616
} |
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
| {
"repo_name": "nmercier/linux-cross-gcc",
"path": "win32/bin/Lib/nntplib.py",
"copies": "4",
"size": "22106",
"license": "bsd-3-clause",
"hash": 4298727355898352000,
"line_mean": 32.7578616352,
"line_max": 80,
"alpha_frac": 0.5397629603,
"autogenerated": false,
"ratio": 4.217092712705075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6756855673005074,
"avg_score": null,
"num_lines": null
} |
"""anno2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from . import views
router = routers.SimpleRouter(trailing_slash=False)
router.register(r'users', views.UserViewSet)
router.register(r'annotations', views.AnnotationViewSet, 'Annotation')
router.register(r'search', views.SearchViewSet, 'Search')
urlpatterns = [
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^admin/', admin.site.urls),
url(r'^auth/token$', views.token, name='token'),
url(r'^store/', include(router.urls)),
url(
r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')
),
url(r'^anno2.js$', views.jsfile, name='jsfile'),
# url(r'', views.root, name='root')
]
| {
"repo_name": "grvsmth/anno2-storage",
"path": "anno2/urls.py",
"copies": "1",
"size": "1424",
"license": "apache-2.0",
"hash": 3955696940711991000,
"line_mean": 36.4736842105,
"line_max": 79,
"alpha_frac": 0.6917134831,
"autogenerated": false,
"ratio": 3.5073891625615765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9699102645661577,
"avg_score": 0,
"num_lines": 38
} |
"""annodomini: utilities for Anno Domini dates and ranges"""
def to_ad(year):
"""Takes an integer year and returns an AD/BC formatted string.
No calendar adjustments of any kind are made.
Example:
>>> print to_ad(1900)
AD 1900
>>> print to_ad(-100)
100 BC
"""
if year == 0:
raise ValueError("There is no year 0 in the AD system")
sign = (year>0)*2-1
if sign >= 0:
return u"AD %d" % year
else:
return u"%d BC" % (sign*year)
def to_ad_range(start, stop):
"""Takes two integer years and returns an AD/BC formatted string.
The result is consistent with Wikipedia policy:
http://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Dates_and_numbers
Example:
>>> to_ad_range(-100, 100)
u'100 BC \u2013 AD 100'
>>> to_ad_range(100, 200)
u'AD 100\u2013200'
"""
if start == 0 or stop == 0:
raise ValueError("There is no year 0 in the AD system")
sign_start = (start>0)*2-1
sign_stop = (stop>0)*2-1
abs_start = abs(start)
abs_stop = abs(stop)
if sign_start < 0 and sign_stop < 0:
return u"%s\u2013%s BC" % (
max(abs_start, abs_stop), min(abs_start, abs_stop) )
elif sign_start > 0 and sign_stop > 0:
return u"AD %s\u2013%s" % (
min(abs_start, abs_stop), max(abs_start, abs_stop) )
else:
return u"%s \u2013 %s" % (
to_ad(min(start, stop)), to_ad(max(start, stop)) )
| {
"repo_name": "isawnyu/pleiades-gane",
"path": "annodomini.py",
"copies": "1",
"size": "1493",
"license": "bsd-3-clause",
"hash": 63982091504369110,
"line_mean": 28.2745098039,
"line_max": 76,
"alpha_frac": 0.5693235097,
"autogenerated": false,
"ratio": 3.1901709401709404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42594944498709403,
"avg_score": null,
"num_lines": null
} |
"""Annotated computation graph management."""
import logging
from collections import OrderedDict
from itertools import chain
import numpy
import theano
from picklable_itertools.extras import equizip
from theano import Variable
from theano.gof import graph
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.scan_module.scan_op import Scan
from toolz import unique
from blocks.config import config
from blocks.roles import (add_role, has_roles, AUXILIARY, PARAMETER, DROPOUT,
COLLECTED, COLLECTOR)
from blocks.utils import (is_graph_input, is_shared_variable, dict_union,
shared_floatx_zeros, shared_like)
import warnings
logger = logging.getLogger(__name__)
class ComputationGraph(object):
r"""Encapsulates a managed Theano computation graph.
This implies that it not only contains the variables required to
compute the given outputs, but also all the auxiliary variables and
updates that were attached to these variables through the annotation
system.
All variables are presented in topologically sorted order according to
the apply nodes that they are an input to.
Parameters
----------
outputs : (list of) :class:`~tensor.TensorVariable`
The output(s) of the computation graph.
Attributes
----------
inputs : list of :class:`~tensor.TensorVariable`
The inputs of the computation graph. This does not include shared
variables and constants.
shared_variables : list of :class:`~tensor.TensorSharedVariable`
All the shared variables in the graph.
parameters : list of :class:`~tensor.TensorSharedVariable`
All the shared variables which have the :const:`.PARAMETER` role.
outputs : list of :class:`~tensor.TensorVariable`
The outputs of the computations graph (as passed to the
constructor).
auxiliary_variables : list of :class:`~tensor.TensorVariable`
All variables which have the :const:`.AUXILIARY` role.
intermediary_variables : list of :class:`~tensor.TensorVariable`
Any variable that is not part of :attr:`inputs` or :attr:`outputs`.
variables : list of :class:`~tensor.TensorVariable`
All variables (including auxiliary) in the managed graph.
scans : list of :class:`~theano.scan_module.scan_op.Scan`
All Scan ops used in this computation graph.
scan_variables : list of :class:`~tensor.TensorVariable`
All variables of the inner graphs of Scan ops.
updates : :class:`~tensor.TensorSharedVariable` updates
All the updates found attached to the annotations.
"""
def __init__(self, outputs):
if isinstance(outputs, Variable):
outputs = [outputs]
self.outputs = outputs
self._get_variables()
self._has_inputs = {}
def __iter__(self):
return iter(self.variables)
@property
def inputs(self):
"""Inputs to the graph, excluding constants and shared variables."""
return [var for var in self.variables if is_graph_input(var)]
@property
def intermediary_variables(self):
return [var for var in self.variables if
var not in self.inputs and
var not in self.outputs]
@property
def shared_variables(self):
return [var for var in self.variables if is_shared_variable(var)]
@property
def parameters(self):
return [var for var in self.shared_variables
if has_roles(var, [PARAMETER])]
@property
def auxiliary_variables(self):
return [var for var in self.variables if has_roles(var, [AUXILIARY])]
@property
def scan_variables(self):
"""Variables of Scan ops."""
return list(chain(*[g.variables for g in self._scan_graphs]))
def _get_variables(self):
"""Collect variables, updates and auxiliary variables.
In addition collects all :class:`.Scan` ops and recurses in the
respective inner Theano graphs.
"""
updates = OrderedDict()
shared_outputs = [o for o in self.outputs if is_shared_variable(o)]
usual_outputs = [o for o in self.outputs if not is_shared_variable(o)]
variables = shared_outputs
if usual_outputs:
# Sort apply nodes topologically, get variables and remove
# duplicates
inputs = graph.inputs(self.outputs)
sorted_apply_nodes = graph.io_toposort(inputs, usual_outputs)
self.scans = list(unique([node.op for node in sorted_apply_nodes
if isinstance(node.op, Scan)]))
self._scan_graphs = [ComputationGraph(scan.outputs)
for scan in self.scans]
seen = set()
main_vars = (
[var for var in list(chain(
*[apply_node.inputs for apply_node in sorted_apply_nodes]))
if not (var in seen or seen.add(var))] +
[var for var in self.outputs if var not in seen])
# While preserving order add auxiliary variables, and collect
# updates
seen = set()
# Intermediate variables could be auxiliary
seen_avs = set(main_vars)
variables = []
for var in main_vars:
variables.append(var)
for annotation in getattr(var.tag, 'annotations', []):
if annotation not in seen:
seen.add(annotation)
new_avs = [
av for av in annotation.auxiliary_variables
if not (av in seen_avs or seen_avs.add(av))]
variables.extend(new_avs)
updates = dict_union(updates, annotation.updates)
self.variables = variables
self.updates = updates
def dict_of_inputs(self):
"""Return a mapping from an input name to the input."""
return {var.name: var for var in self.inputs}
def replace(self, replacements):
"""Replace certain variables in the computation graph.
Parameters
----------
replacements : dict
The mapping from variables to be replaced to the corresponding
substitutes.
Examples
--------
>>> import theano
>>> from theano import tensor, function
>>> x = tensor.scalar('x')
>>> y = x + 2
>>> z = y + 3
>>> a = z + 5
Let's suppose we have dependent replacements like
>>> replacements = {y: x * 2, z: y * 3}
>>> cg = ComputationGraph([a])
>>> theano.pprint(a) # doctest: +NORMALIZE_WHITESPACE
'(((x + TensorConstant{2}) + TensorConstant{3}) +
TensorConstant{5})'
>>> cg_new = cg.replace(replacements)
>>> theano.pprint(
... cg_new.outputs[0]) # doctest: +NORMALIZE_WHITESPACE
'(((x * TensorConstant{2}) * TensorConstant{3}) +
TensorConstant{5})'
First two sums turned into multiplications
>>> float(function(cg_new.inputs, cg_new.outputs)(3.)[0])
23.0
"""
# Due to theano specifics we have to make one replacement in time
replacements = OrderedDict(replacements)
outputs_cur = self.outputs
# `replacements` with previous replacements applied. We have to track
# variables in the new graph corresponding to original replacements.
replacement_keys_cur = []
replacement_vals_cur = []
# Sort `replacements` in topological order
# variables in self.variables are in topological order
remaining_replacements = replacements.copy()
for variable in self.variables:
if variable in replacements:
if has_roles(variable, [AUXILIARY]):
warnings.warn(
"replace method was asked to replace a variable ({}) "
"that is an auxiliary variable.".format(variable))
replacement_keys_cur.append(variable)
# self.variables should not contain duplicates,
# otherwise pop() may fail.
replacement_vals_cur.append(
remaining_replacements.pop(variable))
# if remaining_replacements is not empty
if remaining_replacements:
warnings.warn(
"replace method was asked to replace a variable(s) ({}) "
"that is not a part of the computational "
"graph.".format(str(remaining_replacements.keys())))
# Replace step-by-step in topological order
while replacement_keys_cur:
replace_what = replacement_keys_cur[0]
replace_by = replacement_vals_cur[0]
# We also want to make changes in future replacements
outputs_new = theano.clone(
outputs_cur + replacement_keys_cur[1:] +
replacement_vals_cur[1:],
replace={replace_what: replace_by})
# Reconstruct outputs, keys, and values
outputs_cur = outputs_new[:len(outputs_cur)]
replacement_keys_cur = outputs_new[len(outputs_cur):
len(outputs_cur) +
len(replacement_keys_cur) - 1]
replacement_vals_cur = outputs_new[len(outputs_cur) +
len(replacement_keys_cur):]
return ComputationGraph(outputs_cur)
def get_theano_function(self, additional_updates=None):
"""Create Theano function from the graph contained."""
updates = self.updates
if additional_updates:
updates = dict_union(updates, OrderedDict(additional_updates))
return theano.function(self.inputs, self.outputs, updates=updates)
def get_snapshot(self, data):
"""Evaluate all role-carrying Theano variables on given data.
Parameters
----------
data : dict of (data source, data) pairs
Data for input variables. The sources should match with the
names of the input variables.
Returns
-------
Dictionary of (variable, variable value on given data) pairs.
"""
role_variables = [var for var in self.variables
if hasattr(var.tag, "roles") and
not is_shared_variable(var)]
value_holders = [shared_like(var) for var in role_variables]
function = self.get_theano_function(equizip(value_holders,
role_variables))
function(*(data[input_.name] for input_ in self.inputs))
return OrderedDict([(var, value_holder.get_value(borrow=True))
for var, value_holder in equizip(role_variables,
value_holders)])
def has_inputs(self, variable):
"""Check if a variable depends on input variables.
Returns
-------
bool
``True`` if the given variable depends on input variables,
``False`` otherwise.
"""
if variable not in self._has_inputs:
self._has_inputs[variable] = False
if is_graph_input(variable):
self._has_inputs[variable] = True
elif getattr(variable, 'owner', None):
for dependancy in variable.owner.inputs:
if self.has_inputs(dependancy):
self._has_inputs[variable] = True
return self._has_inputs[variable]
def add_annotation(var, annotation):
annotations = getattr(var.tag, 'annotations', [])
if any(old_annotation.__class__ == annotation.__class__
for old_annotation in annotations):
raise ValueError
else:
var.tag.annotations = annotations + [annotation]
class Annotation(object):
"""Annotations on Theano variables in a graph.
In Blocks annotations are automatically attached to variables created
using bricks. One form of annotation is that many variables are
assigned a role (see :class:`.VariableRole`). A second form of
annotation comes in the form of attaching a :class:`Annotation`
instance to the variable's ``tag`` attribute, with auxiliary variables
and/or updates.
For example, we might be interested in the mean activation of certain
application of a :class:`.Linear` brick. The variable representing the
mean activation is attached as an auxiliary variable to the annotations
of the input and output variables of this brick. Using the
:class:`ComputationGraph` class (the
:attr:`~ComputationGraph.variables`,
:attr:`~ComputationGraph.auxiliary_variables`, etc. attributes in
particular) we can retrieve these Theano variables to pass on to the
monitor, use as a regularizer, etc.
In most cases, annotations are added on a brick level (e.g. each brick
will assign the weight norm of its weights as an auxiliary value) or on
an application level (e.g. each time a brick is applied, its mean
activation will become an auxiliary variable). However, you can also
add annotations manually, by setting the ``annotation`` value of a
variable's ``tag`` field.
Examples
--------
>>> from theano import tensor
>>> x = tensor.vector()
>>> annotation = Annotation()
>>> annotation.add_auxiliary_variable(x + 1, name='x_plus_1')
>>> add_annotation(x, annotation)
>>> y = x ** 2
>>> from blocks.graph import ComputationGraph
>>> cg = ComputationGraph([y])
>>> cg.auxiliary_variables
[x_plus_1]
"""
def __init__(self):
self.auxiliary_variables = []
self.updates = OrderedDict()
def add_auxiliary_variable(self, variable, roles=None, name=None):
"""Attach an auxiliary variable to the graph.
Auxiliary variables are Theano variables that are not part of a
brick's output, but can be useful nonetheless e.g. as a regularizer
or to monitor during training progress.
Parameters
----------
variable : :class:`~tensor.TensorVariable`
The variable you want to add.
roles : list of :class:`.VariableRole` instances, optional
The roles of this variable. The :const:`.AUXILIARY`
role will automatically be added. Other options are
:const:`.COST`, :const:`.WEIGHT`, etc.
name : str, optional
Name to give to the variable. If the variable already has a
name it will be overwritten.
Examples
--------
>>> from blocks.bricks.base import application, Brick
>>> from blocks.roles import COST
>>> from blocks.utils import shared_floatx_nans
>>> class Foo(Brick):
... def _allocate(self):
... W = shared_floatx_nans((10, 10))
... self.add_auxiliary_variable(W.mean(), name='mean_W')
... @application
... def apply(self, x, application_call):
... application_call.add_auxiliary_variable(
... x - 1, name='x_minus_1')
... application_call.add_auxiliary_variable(
... x.mean(), roles=[COST], name='mean_x')
... return x + 1
>>> from theano import tensor
>>> x = tensor.vector()
>>> y = Foo().apply(x)
>>> from blocks.filter import VariableFilter
>>> cg = ComputationGraph([y])
>>> var_filter = VariableFilter(roles=[AUXILIARY])
>>> var_filter(cg.variables) # doctest: +SKIP
{x_minus_1, mean_W, mean_x}
>>> var_filter = VariableFilter(roles=[COST])
>>> var_filter(cg.variables) # doctest: +SKIP
{mean_x}
"""
add_annotation(variable, self)
if name is not None:
variable.name = name
variable.tag.name = name
add_role(variable, AUXILIARY)
if roles is not None:
for role in roles:
add_role(variable, role)
self.auxiliary_variables.append(variable)
def apply_noise(computation_graph, variables, level, seed=None):
"""Add Gaussian noise to certain variable of a computation graph.
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
variables : :class:`~tensor.TensorVariable`
Variables to add noise to.
level : float
Noise level.
seed : int, optional
The seed with which
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` is initialized,
is set to 1 by default.
"""
if not seed:
seed = config.default_seed
rng = MRG_RandomStreams(seed)
replace = {}
for variable in variables:
replace[variable] = (variable +
rng.normal(variable.shape, std=level))
return computation_graph.replace(replace)
def collect_parameters(computation_graph, parameters):
"""Replace parameters with a single shared variable.
This can be useful if you need to calculate the full Hessian of a
computational graph. It replaces parameters with slices of a single
large vectors like
>>> from blocks.utils import shared_floatx
>>> W1 = shared_floatx(numpy.random.rand(10, 10))
>>> W2 = shared_floatx(numpy.random.rand(10, 10))
>>> all_parameters = shared_floatx(numpy.concatenate(
... [W1.get_value().flatten(), W2.get_value().flatten()]))
>>> W1 = all_parameters[:W1.size]
>>> W2 = all_parameters[W1.size:]
Parameters
----------
computation_graph : :class:`ComputationGraph` instance
The managed Theano graph in which to collect parameters.
parameters : list of Theano shared variables
The parameters whose values should be collected.
Returns
-------
ComputationGraph instance
A new Theano graph which has all the given parameters collected
into a single large shared variable.
Notes
-----
Note that this replacement makes the training of the model
significantly slower because of the large amount of Theano's
``set_subtensor`` calls needed to train the model.
Examples
--------
>>> from blocks.bricks import MLP, Logistic
>>> from blocks.bricks.cost import SquaredError
>>> from theano import tensor
>>> x = tensor.matrix()
>>> mlp = MLP(activations=[Logistic(), Logistic()],
... dims=[784, 100, 784])
>>> cost = SquaredError().apply(x, mlp.apply(x))
>>> cg = ComputationGraph(cost)
>>> new_cg = collect_parameters(cg, cg.shared_variables)
The new graph only has a single shared variable. This variable receives
the :const:`COLLECTOR` role.
>>> new_cg.shared_variables
[collected_parameters]
The bricks' variables have been replaced with reshaped segments of this
single shared variable. These replacements are given the
:const:`.COLLECTED` role.
>>> from blocks.filter import VariableFilter
>>> from blocks.roles import PARAMETER
>>> var_filter = VariableFilter(roles=[COLLECTED])
>>> var_filter(new_cg.variables) # doctest: +SKIP
[Reshape{1}.0, Reshape{1}.0, Reshape{2}.0, Reshape{2}.0]
"""
parameter_values, parameter_sizes, parameter_shapes = [], [], []
for parameter in parameters:
parameter_values.append(parameter.get_value(borrow=True))
parameter_sizes.append(parameter_values[-1].size)
parameter_shapes.append(parameter_values[-1].shape)
new_parameters = shared_floatx_zeros(sum(parameter_sizes))
new_parameters.set_value(numpy.concatenate([value.flatten()
for value in parameter_values]))
new_parameters.name = 'collected_parameters'
add_role(new_parameters, COLLECTOR)
replacements = {}
for parameter, shape, i, j in zip(parameters, parameter_shapes,
numpy.cumsum([0] + parameter_sizes[:-1]),
numpy.cumsum(parameter_sizes)):
new_parameter = new_parameters[i:j].reshape(shape)
new_parameter.replacement_of = parameter
add_role(new_parameter, COLLECTED)
replacements[parameter] = new_parameter
return computation_graph.replace(replacements)
def apply_dropout(computation_graph, variables, drop_prob, rng=None,
seed=None):
"""Returns a graph to variables in a computational graph.
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
variables : list of :class:`~tensor.TensorVariable`
Variables to be dropped out.
drop_prob : float
Probability of dropping out. If you want to apply the dropout
with different probabilities for different layers, call it
several times.
rng : :class:`~theano.sandbox.rng_mrg.MRG_RandomStreams`
Random number generator.
seed : int
Random seed to be used if `rng` was not specified.
Notes
-----
For more information, see [DROPOUT]_.
.. [DROPOUT] Hinton et al. *Improving neural networks by preventing
co-adaptation of feature detectors*, arXiv:1207.0580.
Examples
--------
>>> import numpy
>>> from theano import tensor, function
>>> from blocks.bricks import MLP, Identity
>>> from blocks.filter import VariableFilter
>>> from blocks.initialization import Constant
>>> from blocks.roles import INPUT
>>> linear = MLP([Identity(), Identity()], [2, 10, 2],
... weights_init=Constant(1), biases_init=Constant(2))
>>> x = tensor.matrix('x')
>>> y = linear.apply(x)
>>> cg = ComputationGraph(y)
We are going to drop out all the input variables
>>> inputs = VariableFilter(roles=[INPUT])(cg.variables)
Here we apply dropout with default setting to our computation graph
>>> cg_dropout = apply_dropout(cg, inputs, 0.5)
Dropped out variables have role `DROPOUT` and are tagged with
`replacement_of` tag. Let's filter these variables and check if they
have the links to original ones.
>>> dropped_out = VariableFilter(roles=[DROPOUT])(cg_dropout.variables)
>>> inputs_referenced = [var.tag.replacement_of for var in dropped_out]
>>> set(inputs) == set(inputs_referenced)
True
Compiling theano functions to forward propagate in original and dropped
out graphs
>>> fprop = function(cg.inputs, cg.outputs[0])
>>> fprop_dropout = function(cg_dropout.inputs, cg_dropout.outputs[0])
Initialize an MLP and apply these functions
>>> linear.initialize()
>>> fprop(numpy.ones((3, 2),
... dtype=theano.config.floatX)) # doctest:+ELLIPSIS
array([[ 42., 42.],
[ 42., 42.],
[ 42., 42.]]...
>>> fprop_dropout(numpy.ones((3, 2),
... dtype=theano.config.floatX)) # doctest:+ELLIPSIS
array([[ 0., 0.],
[ 0., 0.],
[ 0., 0.]]...
And after the second run answer is different
>>> fprop_dropout(numpy.ones((3, 2),
... dtype=theano.config.floatX)) # doctest:+ELLIPSIS
array([[ 0., 52.],
[ 100., 0.],
[ 0., 0.]]...
"""
if not rng and not seed:
seed = config.default_seed
if not rng:
rng = MRG_RandomStreams(seed)
replacements = [(var, var *
rng.binomial(var.shape, p=1 - drop_prob,
dtype=theano.config.floatX) /
(1 - drop_prob))
for var in variables]
for variable, replacement in replacements:
add_role(replacement, DROPOUT)
replacement.tag.replacement_of = variable
return computation_graph.replace(replacements)
| {
"repo_name": "nke001/attention-lvcsr",
"path": "libs/blocks/blocks/graph.py",
"copies": "1",
"size": "24047",
"license": "mit",
"hash": -1561981364276055300,
"line_mean": 37.4137380192,
"line_max": 79,
"alpha_frac": 0.6063958082,
"autogenerated": false,
"ratio": 4.416345270890726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 626
} |
"""Annotated variant VCF files with additional information.
- GATK variant annotation with snpEff predicted effects.
"""
import os
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.variation import vcfutils
def get_gatk_annotations(config, include_depth=True):
"""Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
"""
broad_runner = broad.runner_from_config(config)
anns = ["BaseQualityRankSumTest", "FisherStrand",
"GCContent", "HaplotypeScore", "HomopolymerRun",
"MappingQualityRankSumTest", "MappingQualityZero",
"QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"]
if include_depth:
anns += ["DepthPerAlleleBySample"]
if broad_runner.gatk_type() == "restricted":
anns += ["Coverage"]
else:
anns += ["DepthOfCoverage"]
return anns
def add_dbsnp(orig_file, dbsnp_file, config):
"""Annotate a VCF file with dbSNP.
"""
orig_file = vcfutils.bgzip_and_index(orig_file, config)
out_file = "%s-wdbsnp.vcf.gz" % utils.splitext_plus(orig_file)[0]
if not utils.file_uptodate(out_file, orig_file):
with file_transaction(config, out_file) as tx_out_file:
cmd = "bcftools annotate -c ID -a {dbsnp_file} -o {tx_out_file} -O z {orig_file}"
do.run(cmd.format(**locals()), "Annotate with dbSNP")
return vcfutils.bgzip_and_index(out_file, config)
def annotate_nongatk_vcf(orig_file, bam_files, dbsnp_file, ref_file, config):
"""Annotate a VCF file with dbSNP and standard GATK called annotations.
"""
orig_file = vcfutils.bgzip_and_index(orig_file, config)
broad_runner = broad.runner_from_config_safe(config)
if not broad_runner or not broad_runner.has_gatk():
return orig_file
else:
out_file = "%s-gatkann%s" % utils.splitext_plus(orig_file)
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
# Avoid issues with incorrectly created empty GATK index files.
# Occurs when GATK cannot lock shared dbSNP database on previous run
idx_file = orig_file + ".idx"
if os.path.exists(idx_file) and not utils.file_exists(idx_file):
os.remove(idx_file)
annotations = get_gatk_annotations(config, include_depth=False)
params = ["-T", "VariantAnnotator",
"-R", ref_file,
"--variant", orig_file,
"--out", tx_out_file,
"-L", orig_file]
if dbsnp_file:
params += ["--dbsnp", dbsnp_file]
for bam_file in bam_files:
params += ["-I", bam_file]
for x in annotations:
params += ["-A", x]
if ("--allow_potentially_misencoded_quality_scores" not in params
and "-allowPotentiallyMisencodedQuals" not in params):
params += ["--allow_potentially_misencoded_quality_scores"]
# be less stringent about BAM and VCF files (esp. N in CIGAR for RNA-seq)
# start by removing existing -U or --unsafe opts
# (if another option is added to Gatk that starts with -U... this may create a bug)
unsafe_options = [x for x in params if x.startswith(("-U", "--unsafe"))]
for my_opt in unsafe_options:
ind_to_rem = params.index(my_opt)
# are the options given as separate strings or in one?
if my_opt.strip() == "-U" or my_opt.strip() == "--unsafe":
params.pop(ind_to_rem + 1)
params.pop(ind_to_rem)
params.extend(["-U", "ALL"])
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params)
vcfutils.bgzip_and_index(out_file, config)
return out_file
| {
"repo_name": "Cyberbio-Lab/bcbio-nextgen",
"path": "bcbio/variation/annotation.py",
"copies": "1",
"size": "4309",
"license": "mit",
"hash": 5816760880851995000,
"line_mean": 47.9659090909,
"line_max": 99,
"alpha_frac": 0.591320492,
"autogenerated": false,
"ratio": 3.6455160744500845,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47368365664500844,
"avg_score": null,
"num_lines": null
} |
"""annotate fusion outputs from STAR and Tophat
Supported:
oncofuse: http://www.unav.es/genetica/oncofuse.html
github: https://github.com/mikessh/oncofuse
"""
from __future__ import print_function
import os
import pysam
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
import bcbio.pipeline.datadict as dd
from bcbio.log import logger
# ## oncofuse fusion trancript detection
def run(data):
if not aligner_supports_fusion(data):
aligner = dd.get_aligner(data)
logger.warning("Oncofuse is not supported for the %s aligner, "
"skipping. " % aligner)
return None
config = data["config"]
genome_build = data.get("genome_build", "")
input_type, input_dir, input_file = _get_input_para(data)
if genome_build == "GRCh37": # assume genome_build is hg19 otherwise
if config["algorithm"].get("aligner") in ["star"]:
input_file = _fix_star_junction_output(input_file)
if config["algorithm"].get("aligner") in ["tophat", "tophat2"]:
input_file = _fix_tophat_junction_output(input_file)
elif "hg19" not in genome_build:
return None
#handle cases when fusion file doesn't exist
if not file_exists(input_file):
return None
out_file = os.path.join(input_dir, "oncofuse_out.txt")
if file_exists(out_file):
return out_file
oncofuse = config_utils.get_program("oncofuse", config)
tissue_type = _oncofuse_tissue_arg_from_config(data)
resources = config_utils.get_resources("oncofuse", config)
if not file_exists(out_file):
cl = [oncofuse]
cl += resources.get("jvm_opts", ["-Xms750m", "-Xmx5g"])
with file_transaction(data, out_file) as tx_out_file:
cl += [input_file, input_type, tissue_type, tx_out_file]
cmd = " ".join(cl)
try:
do.run(cmd, "oncofuse fusion detection", data)
except:
do.run("touch %s && echo '# failed' >> %s" % (tx_out_file, tx_out_file), "oncofuse failed", data)
#return out_file
return out_file
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def aligner_supports_fusion(data):
SUPPORTED_ALIGNERS = ["tophat2", "tophat", "star"]
aligner = dd.get_aligner(data)
return aligner and aligner.lower() in SUPPORTED_ALIGNERS
def _get_input_para(data):
TOPHAT_FUSION_OUTFILE = "fusions.out"
STAR_FUSION_OUTFILE = "Chimeric.out.junction"
config = data["config"]
is_disambiguate = len(config["algorithm"].get("disambiguate", [])) > 0
aligner = config["algorithm"].get("aligner")
if aligner == "tophat2":
aligner = "tophat"
names = data["rgnames"]
# set some default hard filters:
N = 2 # min. spanning reads
M = 4 # min. supporting reads (spanning + encompassing)
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"])
align_dir_parts = os.path.join(align_dir_parts, data["genome_build"]) if is_disambiguate else align_dir_parts
if aligner in ["tophat", "tophat2"]:
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"], names["lane"]+"_%s" % aligner)
return "tophat-%d-%d" % (N,M), align_dir_parts, os.path.join(align_dir_parts, TOPHAT_FUSION_OUTFILE)
if aligner in ["star"]:
star_junction_file = os.path.join(align_dir_parts, names["lane"]+STAR_FUSION_OUTFILE)
if is_disambiguate:
contamination_bam = data["disambiguate"][ config["algorithm"]["disambiguate"][0] ]
disambig_out_file = star_junction_file + "_disambiguated"
if file_exists(disambig_out_file):
star_junction_file = disambig_out_file
elif file_exists(star_junction_file) and file_exists(contamination_bam):
star_junction_file = _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam,
disambig_out_file, data)
return "rnastar-%d-%d" % (N,M), align_dir_parts, star_junction_file
return None
def _fix_tophat_junction_output(chimeric_out_junction_file):
#for fusion.out
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
left, right = parts[0].split("-")
leftchr = _h37tohg19(left)
rightchr = _h37tohg19(right)
if not leftchr or not rightchr:
continue
parts[0] = "%s-%s" % (_h37tohg19(left), _h37tohg19(right))
out_handle.write("\t".join(parts))
return out_file
def _fix_star_junction_output(chimeric_out_junction_file):
#for Chimeric.out.junction
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
parts[0] = _h37tohg19(parts[0])
parts[3] = _h37tohg19(parts[3])
if not parts[0] or not parts[3]:
continue
out_handle.write("\t".join(parts))
return out_file
def _h37tohg19(chromosome):
MAX_CHROMOSOMES = 23
if chromosome in [str(x) for x in range(1, MAX_CHROMOSOMES)] + ["X", "Y"]:
new_chrom = "chr%s" % chromosome
elif chromosome == "MT":
new_chrom = "chrM"
# not a supported chromosome
else:
return None
return new_chrom
def _oncofuse_tissue_arg_from_config(data):
"""Retrieve oncofuse arguments supplied through input configuration.
tissue_type is the library argument, which tells Oncofuse to use its
own pre-built gene expression libraries. There are four pre-built
libraries, corresponding to the four supported tissue types:
EPI (epithelial origin),
HEM (hematological origin),
MES (mesenchymal origin) and
AVG (average expression, if tissue source is unknown).
"""
SUPPORTED_TISSUE_TYPE = ["EPI", "HEM", "MES", "AVG"]
if data.get("metadata", {}).get("tissue") in SUPPORTED_TISSUE_TYPE:
return data.get("metadata", {}).get("tissue")
else:
return "AVG"
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data):
""" Disambiguate detected fusions based on alignments to another species.
"""
out_file = disambig_out_file
fusiondict = {}
with open(star_junction_file, "r") as in_handle:
for my_line in in_handle:
my_line_split = my_line.strip().split("\t")
if len(my_line_split) < 10:
continue
fusiondict[my_line_split[9]] = my_line.strip("\n")
with pysam.Samfile(contamination_bam, "rb") as samfile:
for my_read in samfile:
if my_read.is_unmapped or my_read.is_secondary:
continue
if my_read.qname in fusiondict:
fusiondict.pop(my_read.qname)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, 'w') as myhandle:
for my_key in fusiondict:
print(fusiondict[my_key], file=myhandle)
return out_file
| {
"repo_name": "biocyberman/bcbio-nextgen",
"path": "bcbio/rnaseq/oncofuse.py",
"copies": "1",
"size": "7546",
"license": "mit",
"hash": -340276373305822660,
"line_mean": 41.156424581,
"line_max": 117,
"alpha_frac": 0.6172806785,
"autogenerated": false,
"ratio": 3.324229074889868,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4441509753389868,
"avg_score": null,
"num_lines": null
} |
'''annotate_rnp - add information to the output from RNPxl
=======================================================
:Author: Tom Smith
:Release: $Id$
:Date: |today|
:Tags: Python RNP Proteomics
Purpose
-------
This script takes the xlsx output from RNPxl and annotates the table with useful information for downstream analyses.
The following columns are added:
- master_protein(s): The master protein(s) for the peptide. See below
for how this is derived
- master_uniprot_id(s): The uniprot id(s) for the master protein(s)
- protein_description(s): Description(s) for the master protein(s)
- protein_length(s): The length(s) of the master protein(s)
- position_in_peptide: The most-likely position of the cross-link in the peptide
- position_in_protein(s): The most-likely position of the cross-link in the protein
- window_13-17: 13, 15 & 17 amino acid windows around the cross-link
position. See below for further notes on these windows
- crap_protein: Is the protein in the cRAP database of common
proteomics proteins, e.g keratin
If a log file is requested (--log), basic statistics are collected and
written to the log file
Fasta description format
------------------------
The source of the protein (SwissProt or TrEMBL) is derived from the
protein fasta description, with SwissProt proteins starting 'sp' and
TrEMBL 'tr'. Furthermore, the description column is derived from the
fasta description too. For this reason the fasta databases must be
correctly formatted as in the examples below. This is the standard
format for fasta files from uniprot.
format:
Three-level identifier followed by protein description:
>[sp|tr]|[Uniprot id]|[Protein name] [Description]
examples:
>sp|P27361|MK03_HUMAN Mitogen-activated protein kinase 3 OS=Homo sapiens GN=MAPK3 PE=1 SV=4
>tr|F8W1T5|F8W1T5_HUMAN GTPase RhebL1 (Fragment) OS=Homo sapiens GN=RHEBL1 PE=4 SV=1
Deriving the master proteins
----------------------------
Matching peptides to their source proteins (protein inference) is a
common task in proteomics and there are many possible
approaches. Ultimately, the aim is usually to identify the most likely
source protein since taking all possible sources makes downstream
analyses very complex. Here we use the parsimonious approach to
identify a minimal set of proteins which explains all peptides
observed. In essense, the approach is as follows:
- start with list of all peptides
- sort proteins by the number of peptides observed
- take the protein(s) with the most peptides and remove these from the peptides list
- continue through the sorted proteins, removing peptides, until the
peptides list is empty
Additionally, we prioritise matches to SwissProt proteins over TrEMBL
proteins. SwissProt proteins have been manually curated and should not
contain any redundant proteins, truncated sequences etc. On the other
hand, the set of TrEMBL proteins will ceratainly contain proteins
which are redundant with respect to the SwissProt proteins as well as
truncated proteins. It is useful to include the TrEMBL proteins to
catch peptides which are from a protein or isoform which has not been
curated into SwissProt yet. However, where a SwissProt match is found,
any TrEMBL match can safely be ignored. Here, for all peptides with
matched to both SwissProt and TrEMBL proteins, we remove all the
TrEMBL matches.
In some instances, it is not possible to assign a single protein to a
peptide. In these cases, the proteins names, uniprot ids, descriptions
and lengths are ';' separated in the outfile.
13, 15 & 17 amino acid windows
------------------------------
For motif analysis, 13, 15 & 17 amino acid windows around the most
likely cross-linked protein are provided. Where the cross-link
posistion is too close to the start or end of the protein for the
window, e.g cross link is at position 6 - not possible to extract a
window from -1 -> 12, the value "protein_too_short_for_window" is
given. Where there is more than one master protein, the windows is
provided only where the amino acid sequence is the same for all master
proteins. Where the sequences diverge, the value
"different_sequences_for_the_proteins" is given. Care must be taken
with any motif analysis since the cross-link site is hard to identify
so the windows may not be well centered. Furthermore, since MS is bias
towards particular peptides, care must be taken to provide a suitable
background set of windows. For example, random windows from the fasta
could simply yield motifs which are enriched in MS analyses
Usage
-----
By default, the outfile will be created in the same directory with the
suffix annotated.xlsx. You can change the outfile name by specifying
the option --outfile
python annotate_rnp.py --infile=RNP.xlsx --fasta-db=Human_201701.fasta
--fasta-crap-db=RAP_FullIdentifiers.fasta --outfile=RNP_annotated.xlsx
--logfile=RNP_annotation.log
Command line options
--------------------
'''
import argparse
import collections
import copy
import os
import re
import sys
import pandas as pd
import proteomics.fasta as fasta
def getMotifWindow(positions, proteins, length):
''' Extract amino acid sequences of (length) n from (proteins),
centered at (positions)'''
assert length % 2 == 1, 'motif window must be an odd length'
assert len(positions) == len(proteins), "must have as many positions as proteins"
windows = set()
for position, protein_seq in zip(positions, proteins):
buffer = ((length - 1) / 2)
windows.add(protein_seq[int(position-buffer): int(position+buffer) + 1])
if min([len(x) for x in windows]) != length:
return "protein_too_short_for_window"
if len(windows) == 1:
return list(windows)[0]
else:
return "different_sequences_for_the_proteins"
def writeSectionHeader(logfile, section_header):
#underliner = "".join(("-",)*len(section_header))
section_blocker = ("======================================="
"=======================================")
underliner1 = ("----------------------------------------"
"----------------------------------------")
logfile.write("\n%s\n%s\n" % (section_blocker, section_header))
logfile.write("%s\n" % underliner1)
return section_blocker
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
argv, usage=__doc__)
optional = parser.add_argument_group('optional arguments')
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--infile', dest="infile", required=True,
help="")
required.add_argument('-f', '--fasta-db', dest="fasta_db", required=True,
help="")
required.add_argument('-fc', '--fasta-crap-db', dest="fasta_crap_db",
required=True, help="")
optional.add_argument('-o', '--outfile', dest="outfile", default=None,
help="")
optional.add_argument('-l', '--logfile', dest="logfile", default=os.devnull,
help="")
args = vars(parser.parse_args())
if args['outfile'] is None:
args['outfile'] = args['infile'].replace(".xlsx", "_annotated.tsv")
logfile = open(args['logfile'], 'w')
logfile.write("Logfile for annotate_rnp.py\n\n")
section_blocker = writeSectionHeader(logfile, "Script arguments:")
for key, value in args.items():
logfile.write("%s: %s\n" % (key, value))
logfile.write("%s\n\n" % section_blocker)
# read the data into a dataframe
rnp_df = pd.read_excel(args['infile'])
# add some basic annotations
rnp_df['tr_only'] = [x.count("sp|") == 0 for x in rnp_df['Proteins']]
rnp_df['matches'] = [len(x.split(",")) for x in rnp_df['Proteins']]
#(1) Get the mappings between peptide and proteins
pep2pro = collections.defaultdict(lambda: collections.defaultdict(set))
pep2allpro = collections.defaultdict(set)
pro2pep = collections.defaultdict(set)
top_level_proteins = set()
initial_proteins = set()
# (1.1) extract the initial mappings between proteins and peptides
for row_ix, row_values in rnp_df[['Proteins', 'Peptide']].iterrows():
proteins = row_values['Proteins'].split(",")
peptide = row_values['Peptide']
if peptide in pep2pro:
assert pep2allpro[peptide] == proteins, (
"The same peptide is observed more than once with different proteins!")
pep2allpro[peptide] = proteins
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
if protein.split("|")[0] == "sp":
protein_level = 1
top_level_proteins.add(protein)
elif protein.split("|")[0] == "tr":
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
section_blocker = writeSectionHeader(logfile, "Initial file stats")
logfile.write("# initial peptides: %i\n" % len(pep2pro))
logfile.write("# initial proteins: %i\n" % len(pro2pep))
logfile.write("# initial SwissProt proteins: %i\n" % len(top_level_proteins))
logfile.write("# initial TrEMBL proteins: %i\n" % (
len(pro2pep)-len(top_level_proteins)))
logfile.write("%s\n\n" % section_blocker)
# (1.2) find the peptides with only TrEMBL protein matches and
# 'upgrade' these TrEMBL proteins to being equivalent to SwissProt
tr_only_peptides = set([x for x in pep2pro.keys() if len(pep2pro[x][1])==0])
set_upgraded = set()
for peptide in tr_only_peptides:
upgraded = pep2pro[peptide][2]
set_upgraded.update(upgraded)
top_level_proteins.update(upgraded)
pep2pro[peptide][2] = pep2pro[peptide][2].difference(set(upgraded))
pep2pro[peptide][1] = pep2pro[peptide][1].union(set(upgraded))
section_blocker = writeSectionHeader(
logfile, "Deciding which TrEMBL proteins to retain:")
logfile.write("# peptides with only TrEMBL matches: %i\n" % (
len(tr_only_peptides)))
logfile.write("# TrEMBL proteins retained as no SwissProt matches for "
"peptide: %i\n" % (len(set_upgraded)))
logfile.write("%s\n\n" % section_blocker)
# (1.3) Use a parsimonious approach to identifty the minimum number
# of proteins required to cover all the peptides:
# Start from the protein(s) with the most peptides and mark these as covered.
# Continue with remaining proteins in order of peptides per protein
# until all peptides are covered
retained_proteins = []
peptides = copy.deepcopy(set(pep2pro.keys()))
peptide_counts = {}
tmppro2pep = copy.deepcopy(pro2pep)
new_top_level_proteins = copy.deepcopy(top_level_proteins)
new_pep2pro = collections.defaultdict(set)
peptide_count = max(map(len, tmppro2pep.values()))
section_blocker = writeSectionHeader(
logfile, ("Parsimonious method to identify minimal set of proteins"
" to account for all peptides"))
while True:
# (1.3.1) If all peptides covered or the maximum peptides per
# protein = 0, break.
if len(peptides) == 0 or peptide_count == 0:
logfile.write("All peptides are now accounted for\n")
break
peptide_count -= 1
top_proteins = set()
top_score = 0
#(1.3.2) Find the proteins with the highest number of peptide matches
for protein in new_top_level_proteins:
if len(tmppro2pep[protein]) == top_score:
top_proteins.add(protein)
elif len(tmppro2pep[protein]) > top_score:
top_score = len(tmppro2pep[protein])
top_proteins = set((protein,))
logfile.write("%i remaining protein(s) with %i peptides\n" % (
len(top_proteins), top_score))
# (1.3.3) Remove the top proteins and the associated peptides
for top_protein in top_proteins:
new_top_level_proteins.remove(top_protein)
retained_proteins.append(top_protein)
for peptide in pro2pep[top_protein]:
new_pep2pro[peptide].add(top_protein)
if peptide in peptides:
peptides.remove(peptide)
for protein in pep2pro[peptide][1]:
if protein == top_protein:
continue
if peptide in tmppro2pep[protein]:
tmppro2pep[protein].remove(peptide)
logfile.write("\n%i proteins retained\n" % len(retained_proteins))
#logfile.write("\n".join([",".join(map(str, (x, len(tmppro2pep[x]), len(pro2pep[x]))))
# for x in new_top_level_proteins]))
logfile.write("%i SwissProt proteins retained\n" % len(
[x for x in retained_proteins if x.split("|")[0] == 'sp']))
logfile.write("%i TrEMBL proteins retained\n" % len(
[x for x in retained_proteins if x.split("|")[0] == 'tr']))
logfile.write("\nNote: If not all SwissProt proteins were retained, this means\n"
"these proteins only included peptides which were observed\n"
"in other proteins which had a greater number of peptides\n")
logfile.write("%s\n\n" % section_blocker)
section_blocker = writeSectionHeader(logfile, "proteins per peptide:")
counts = collections.Counter([len(x) for x in new_pep2pro.values()])
sum_counts = sum(counts.values())
for k, v in counts.items():
logfile.write("%i peptide(s) (%.2f %%) have %i master protein(s)\n" % (
v, (100 * v)/sum_counts, k))
logfile.write("%s\n\n" % section_blocker)
# Check all the peptides are covered
assert set(pep2pro.keys()).difference(set(new_pep2pro.keys())) == set()
# add the top protein and uniprot id annotations
rnp_df['master_protein(s)'] = [";".join(new_pep2pro[protein]) for protein in rnp_df['Peptide']]
rnp_df['master_uniprot_id(s)'] = [";".join([protein_id.split("|")[1] for protein_id in new_pep2pro[protein]])
for protein in rnp_df['Peptide']]
# (1.4) Build dictionaries to map from protein id to protein
# sequence and description using the fasta database
crap_proteins = set()
protein2description = {
entry.title.split(" ")[0]: " ".join(entry.title.split(" ")[1:])
for entry in fasta.FastaIterator(open(args['fasta_db']))}
protein2seq = {
entry.title.split(" ")[0]:entry.sequence
for entry in fasta.FastaIterator(open(args['fasta_db']))}
for entry in fasta.FastaIterator(open(args['fasta_crap_db'])):
protein2seq[entry.title.split(" ")[0]] = entry.sequence
protein2description[entry.title.split(" ")[0]] = entry.title.split(" ")[0]
crap_proteins.add(entry.title.split(" ")[0])
# (1.5) derive further annotations
protein_lengths = []
protein_descriptions = []
crap_protein = []
position_in_peptide = []
position_in_protein = []
motif_13 = []
motif_15 = []
motif_17 = []
for ix, row in rnp_df.iterrows():
peptide = row['Best localization(s)']
proteins = row['master_protein(s)'].split(";")
protein_lengths.append(";".join(map(str, [len(protein2seq[x]) for x in proteins])))
protein_descriptions.append(";".join([protein2description[x] for x in proteins]))
# (1.5.1) does peptide match a cRAP protein?
crap = 0
for protein in proteins:
if protein in crap_proteins:
crap = 1
break
crap_protein.append(crap)
# (1.5.1) Find crosslink position in protein and extract 13,
# 15 & 17 aa windows around the crosslink position
if row['Best localization(s)']!='nan' and row['Best loc score']>0:
peptide_positions = [re.search(peptide.upper(), protein2seq[x]).start() for
x in proteins]
crosslink_position = None
for ix, aa in enumerate(peptide):
if aa == aa.lower():
crosslink_position = ix
assert crosslink_position is not None, (
"no crosslinked position was found(!): %s" % peptide)
position_in_peptide.append(crosslink_position + 1)
protein_positions = [crosslink_position + x for x in peptide_positions]
position_in_protein.append(
";".join(map(str, [x + 1 for x in protein_positions])))
motif_13.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 13))
motif_15.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 15))
motif_17.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 17))
else:
position_in_peptide.append("no_crosslink")
position_in_protein.append("no_crosslink")
motif_13.append("no_crosslink")
motif_15.append("no_crosslink")
motif_17.append("no_crosslink")
rnp_df['protein_length(s)'] = protein_lengths
rnp_df['protein_description(s)'] = protein_descriptions
rnp_df['crap_protein'] = crap_protein
rnp_df['position_in_peptide'] = position_in_peptide
rnp_df['position_in_protein(s)'] = position_in_protein
rnp_df['window_13'] = motif_13
rnp_df['window_15'] = motif_15
rnp_df['window_17'] = motif_17
new_column_order = [
"Best localization(s)",
"RNA",
"master_protein(s)",
"master_uniprot_id(s)",
'protein_description(s)',
'protein_length(s)',
'position_in_peptide',
'position_in_protein(s)',
'window_13', 'window_15', 'window_17',
'crap_protein',
"Peptide",
"Proteins"]
new_column_order.extend([x for x in rnp_df.columns if x not in new_column_order])
final_rnp_df = rnp_df[new_column_order]
final_rnp_df.to_csv(args['outfile'], index=False, sep="\t")
os.chmod(args['outfile'], 0o666)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"repo_name": "TomSmithCGAT/CamProt",
"path": "camprot/scripts/annotate_rnp.py",
"copies": "1",
"size": "18349",
"license": "mit",
"hash": 6903634075899758000,
"line_mean": 39.7755555556,
"line_max": 117,
"alpha_frac": 0.6380729195,
"autogenerated": false,
"ratio": 3.433570359281437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9561044308149544,
"avg_score": 0.0021197941263786505,
"num_lines": 450
} |
# Annotate stacktrace
# Use `print-stack 100` in Bochs
# Use `x /100gx $esp` in Qemu
import re
import sys
from pathlib import Path
from subprocess import Popen, PIPE
elf_file = Path(sys.argv[1]).resolve(strict=True)
return_addr_names = {} # return_addr -> function_name
# Read objdump address to assembly mappings
with Popen(["objdump", "-d", "-M", "intel", str(elf_file)], stdout=PIPE) as p:
store_next_addr_as = None
for line in p.stdout:
line = line.strip(b" \t")
columns = line.count(b"\t")
if columns == 0: # Skip useless lines
continue
if columns == 1: # Skip second (overflown) line of raw bytes
continue
addr, _bytes, code = line.split(b"\t")
addr = int(addr[:-1], 16)
if name := store_next_addr_as:
return_addr_names[addr] = name
store_next_addr_as = False
if m := re.match(br"call\s+[0-9a-f]+\s+<(.+)>", code):
store_next_addr_as = m.group(1).decode()
addrs = []
for line in sys.stdin:
line = line.strip()
if not line:
break
# Qemu
if m := re.match(r"[0-9a-f]{16}: 0x([0-9a-f]{16}) 0x([0-9a-f]{16})", line):
addrs.append(int(m.group(1), 16))
addrs.append(int(m.group(2), 16))
# Bochs
elif m := re.match(
r"\| STACK 0x[0-9a-f]{16} \[0x([0-9a-f]{8}):0x([0-9a-f]{8})\]", line
):
addrs.append(int(m.group(1) + m.group(2), 16))
if not addrs:
exit("No addresses read")
for addr in addrs:
if name := return_addr_names.get(addr):
print(f"{hex(addr)[2:]:16} {name}")
| {
"repo_name": "Dentosal/rust_os",
"path": "tools/stack.py",
"copies": "1",
"size": "1601",
"license": "mit",
"hash": -1540108680090027500,
"line_mean": 25.6833333333,
"line_max": 79,
"alpha_frac": 0.5596502186,
"autogenerated": false,
"ratio": 2.9215328467153285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8979639855438786,
"avg_score": 0.00030864197530864197,
"num_lines": 60
} |
"""Annotate structural variant calls with associated genes.
"""
import os
import sys
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import regions
from bcbio.variation import bedutils
from bcbio.pipeline import config_utils
import pybedtools
def add_genes(in_file, data, max_distance=10000, work_dir=None):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file
def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info")
def gene_one_per_line(in_file, data):
"""Split comma-separated gene annotations (after add_genes). Leads to duplicated records.
Input:
chr1 100 200 F1,F2
Output:
chr1 100 200 F1
chr1 100 200 F2
"""
if in_file:
# Report all duplicated annotations one-per-line
one_per_line_file = "%s-opl.bed" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(one_per_line_file, in_file):
with file_transaction(data, one_per_line_file) as tx_out_file:
with open(tx_out_file, 'w') as out:
for r in pybedtools.BedTool(in_file):
for g in r.name.split(','):
out.write('\t'.join(map(str, [r.chrom, r.start, r.end, g])) + '\n')
return one_per_line_file
def count_genes(in_file, data):
if pybedtools.BedTool(in_file).field_count() <= 3:
ann_bed = add_genes(in_file, data)
ann_bed = gene_one_per_line(ann_bed, data)
else:
ann_bed = in_file
if ann_bed:
return len(list(set(r.name for r in pybedtools.BedTool(ann_bed)
if r.name and r.name != ".")))
| {
"repo_name": "lbeltrame/bcbio-nextgen",
"path": "bcbio/structural/annotate.py",
"copies": "4",
"size": "4732",
"license": "mit",
"hash": 5022151497180512000,
"line_mean": 45.8514851485,
"line_max": 104,
"alpha_frac": 0.6181318681,
"autogenerated": false,
"ratio": 3.203791469194313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5821923337294314,
"avg_score": null,
"num_lines": null
} |
"""Annotate structural variant calls with associated genes.
"""
import os
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import regions
from bcbio.variation import bedutils
import pybedtools
def add_genes(in_file, data, max_distance=10000):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
input_rec = iter(pybedtools.BedTool(in_file)).next()
# keep everything after standard chrom/start/end, 1-based
extra_fields = range(4, len(input_rec.fields) + 1)
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s) $%s = "."} {print}'""" %
(max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd()
cmd = ("{sort_cmd} -k1,1 -k2,2n {in_file} | "
"bedtools closest -g <(cut -f1,2 {fai_file} | {sort_cmd} -k1,1 -k2,2n) "
"-d -t all -a - -b <({sort_cmd} -k1,1 -k2,2n {gene_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' > {tx_out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info")
return out_file
else:
return in_file
| {
"repo_name": "gifford-lab/bcbio-nextgen",
"path": "bcbio/structural/annotate.py",
"copies": "4",
"size": "2474",
"license": "mit",
"hash": -3524383843515285000,
"line_mean": 49.4897959184,
"line_max": 107,
"alpha_frac": 0.5840743735,
"autogenerated": false,
"ratio": 3.46013986013986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6044214233639861,
"avg_score": null,
"num_lines": null
} |
"""Annotate structural variant calls with associated genes.
"""
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.structural import regions
import pybedtools
def add_genes(in_file, data, max_distance=10000):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
input_rec = iter(pybedtools.BedTool(in_file)).next()
# keep everything after standard chrom/start/end, 1-based
extra_fields = range(4, len(input_rec.fields) + 1)
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
with file_transaction(data, out_file) as tx_out_file:
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s) $%s = "."} {print}'""" %
(max_distance, gene_index))
cmd = ("sort -k1,1 -k2,2n {in_file} | "
"bedtools closest -d -t all -a - -b {gene_file} | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' > {tx_out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info")
return out_file
else:
return in_file
| {
"repo_name": "elkingtonmcb/bcbio-nextgen",
"path": "bcbio/structural/annotate.py",
"copies": "4",
"size": "2146",
"license": "mit",
"hash": 8552635327902342000,
"line_mean": 48.9069767442,
"line_max": 107,
"alpha_frac": 0.5838769804,
"autogenerated": false,
"ratio": 3.6372881355932205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001705904984650321,
"num_lines": 43
} |
"""Annotate the segmented tissue."""
import skimage.draw
from jicbioimage.core.transform import transformation
from jicbioimage.illustrate import AnnotatedImage
from jicbioimage.core.util.color import pretty_color
from util import argparse_get_image
from segment import segment
from transform import rotate
def annotate_segmentation(image, segmentation):
"""Return annotated segmentation."""
annotation = AnnotatedImage.from_grayscale(image)
for i in segmentation.identifiers:
region = segmentation.region_by_identifier(i)
color = pretty_color()
annotation.mask_region(region.border.dilate(), color)
props = skimage.measure.regionprops(segmentation)
for p in props:
try:
minr, minc, maxr, maxc = p.bbox
cval = int(p.centroid[1])
line = skimage.draw.line(minr, cval, maxr, cval)
annotation.mask_region(line, (0, 255, 0))
except IndexError:
# Don't draw line if it falls outside of the image.
pass
return annotation
@transformation
def annotate(image):
"""Return annotated image."""
segmentation, angle = segment(image)
image = rotate(image, angle)
return annotate_segmentation(image, segmentation)
def main():
image = argparse_get_image()
a = annotate(image)
with open("annotated.png", "wb") as fh:
fh.write(a.png())
if __name__ == "__main__":
main()
| {
"repo_name": "JIC-CSB/wheat-leaf-segmentation",
"path": "scripts/annotate.py",
"copies": "1",
"size": "1442",
"license": "mit",
"hash": -2509996194728047000,
"line_mean": 25.2181818182,
"line_max": 63,
"alpha_frac": 0.6671289875,
"autogenerated": false,
"ratio": 3.9291553133514987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5096284300851499,
"avg_score": null,
"num_lines": null
} |
"""Annotate tweets with stance markers"""
from twit.models import Tweet
from twit.util import identify_stance, queryset_iterator
from twit.tqdm import tqdm
from . import DataProcessingCommand
class Command(DataProcessingCommand):
"""Annotate tweets with stance markers"""
help = __doc__
def process(self, istream, count=None):
"""
Each element of istream is a tweet.
Use the text as a marker.
Return (id, text, score).
Client must return (header, ostream)
"""
if count is not None:
istream = tqdm(istream, total=count)
for tweet in istream:
stance = identify_stance(tweet.text.lower())
for candidate, value in stance.items():
yield [tweet.id, candidate, value, 1.0]
def db_to_istream(self):
return queryset_iterator(Tweet.objects.all())
def db_istream_count(self):
return Tweet.objects.count()
def output_header(self):
return ["tweet_id", "entity", "stance", "score"]
def ostream_to_db(self, ostream):
raise NotImplementedError("Must define fn to load database from output stream")
| {
"repo_name": "arunchaganty/aeschines",
"path": "django/twit/management/commands/annotate_stance.py",
"copies": "2",
"size": "1168",
"license": "mit",
"hash": -8944245264627378000,
"line_mean": 28.9487179487,
"line_max": 87,
"alpha_frac": 0.636130137,
"autogenerated": false,
"ratio": 3.8933333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5529463470333333,
"avg_score": null,
"num_lines": null
} |
"""Annotate with potential DNA damage artifacts by examining strand/read bias.
Uses DKFZBiasFilter to identify strand and PCR bias and converts these into
INFO level annotations of low frequency variants:
https://github.com/bcbio/bcbio.github.io/blob/master/_posts/2017-01-31-damage-filters.md
"""
import io
import os
import shutil
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import vcfutils
def run_filter(vrn_file, align_bam, ref_file, data, items):
"""Filter and annotate somatic VCFs with damage/bias artifacts on low frequency variants.
Moves damage estimation to INFO field, instead of leaving in FILTER.
"""
if not should_filter(items) or not vcfutils.vcf_has_variants(vrn_file):
return data
else:
raw_file = "%s-damage.vcf" % utils.splitext_plus(vrn_file)[0]
out_plot_files = ["%s%s" % (utils.splitext_plus(raw_file)[0], ext)
for ext in ["_seq_bias_simplified.pdf", "_pcr_bias_simplified.pdf"]]
if not utils.file_uptodate(raw_file, vrn_file) and not utils.file_uptodate(raw_file + ".gz", vrn_file):
with file_transaction(items[0], raw_file) as tx_out_file:
# Does not apply --qcSummary plotting due to slow runtimes
dkfzbiasfilter = utils.which(config_utils.get_program("dkfzbiasfilter.py", data))
cmd = [dkfzbiasfilter, "--filterCycles", "1", "--passOnly",
"--tempFolder", os.path.dirname(tx_out_file),
vrn_file, align_bam, ref_file, tx_out_file]
do.run(cmd, "Filter low frequency variants for DNA damage and strand bias")
for out_plot in out_plot_files:
tx_plot_file = os.path.join("%s_qcSummary" % utils.splitext_plus(tx_out_file)[0], "plots",
os.path.basename(out_plot))
if utils.file_exists(tx_plot_file):
shutil.move(tx_plot_file, out_plot)
raw_file = vcfutils.bgzip_and_index(raw_file, items[0]["config"])
data["vrn_file"] = _filter_to_info(raw_file, items[0])
out_plot_files = [x for x in out_plot_files if utils.file_exists(x)]
data["damage_plots"] = out_plot_files
return data
def _filter_to_info(in_file, data):
"""Move DKFZ filter information into INFO field.
"""
header = ("""##INFO=<ID=DKFZBias,Number=.,Type=String,"""
"""Description="Bias estimation based on unequal read support from DKFZBiasFilterVariant Depth">\n""")
out_file = "%s-ann.vcf" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file):
with file_transaction(data, out_file) as tx_out_file:
with utils.open_gzipsafe(in_file) as in_handle:
with io.open(tx_out_file, "w", encoding="utf-8") as out_handle:
for line in in_handle:
if line.startswith("#CHROM"):
out_handle.write(header + line)
elif line.startswith("#"):
out_handle.write(line)
else:
out_handle.write(_rec_filter_to_info(line))
return vcfutils.bgzip_and_index(out_file, data["config"])
def _rec_filter_to_info(line):
"""Move a DKFZBias filter to the INFO field, for a record.
"""
parts = line.rstrip().split("\t")
move_filters = {"bSeq": "strand", "bPcr": "damage"}
new_filters = []
bias_info = []
for f in parts[6].split(";"):
if f in move_filters:
bias_info.append(move_filters[f])
elif f not in ["."]:
new_filters.append(f)
if bias_info:
parts[7] += ";DKFZBias=%s" % ",".join(bias_info)
parts[6] = ";".join(new_filters or ["PASS"])
return "\t".join(parts) + "\n"
def should_filter(items):
"""Check if we should do damage filtering on somatic calling with low frequency events.
"""
return (vcfutils.get_paired(items) is not None and
any("damage_filter" in dd.get_tools_on(d) for d in items))
| {
"repo_name": "a113n/bcbio-nextgen",
"path": "bcbio/variation/damage.py",
"copies": "4",
"size": "4334",
"license": "mit",
"hash": 3467379691616597500,
"line_mean": 47.6966292135,
"line_max": 116,
"alpha_frac": 0.6054453161,
"autogenerated": false,
"ratio": 3.456140350877193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6061585666977193,
"avg_score": null,
"num_lines": null
} |
"""Annotation and rtyping support for the result of os.stat(), os.lstat()
and os.fstat(). In RPython like in plain Python the stat result can be
indexed like a tuple but also exposes the st_xxx attributes.
"""
import os
import sys
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rlib import rposix
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper import extregistry
from rpython.rtyper.annlowlevel import hlstr
from rpython.rtyper.extfunc import extdef
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rtyper.rtuple import TUPLE_TYPE
from rpython.rtyper.tool import rffi_platform as platform
from rpython.tool.pairtype import pairtype
from rpython.tool.sourcetools import func_renamer
from rpython.translator.tool.cbuild import ExternalCompilationInfo
# Support for float times is here.
# - ALL_STAT_FIELDS contains Float fields if the system can retrieve
# sub-second timestamps.
# - TIMESPEC is defined when the "struct stat" contains st_atim field.
if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'):
TIMESPEC = platform.Struct('struct timespec',
[('tv_sec', rffi.TIME_T),
('tv_nsec', rffi.LONG)])
else:
TIMESPEC = None
# all possible fields - some of them are not available on all platforms
ALL_STAT_FIELDS = [
("st_mode", lltype.Signed),
("st_ino", lltype.SignedLongLong),
("st_dev", lltype.SignedLongLong),
("st_nlink", lltype.Signed),
("st_uid", lltype.Signed),
("st_gid", lltype.Signed),
("st_size", lltype.SignedLongLong),
("st_atime", lltype.Float),
("st_mtime", lltype.Float),
("st_ctime", lltype.Float),
("st_blksize", lltype.Signed),
("st_blocks", lltype.Signed),
("st_rdev", lltype.Signed),
("st_flags", lltype.Signed),
#("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented
#("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented
]
N_INDEXABLE_FIELDS = 10
# For OO backends, expose only the portable fields (the first 10).
PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
STATVFS_FIELDS = [
("f_bsize", lltype.Signed),
("f_frsize", lltype.Signed),
("f_blocks", lltype.Signed),
("f_bfree", lltype.Signed),
("f_bavail", lltype.Signed),
("f_files", lltype.Signed),
("f_ffree", lltype.Signed),
("f_favail", lltype.Signed),
("f_flag", lltype.Signed),
("f_namemax", lltype.Signed),
]
# ____________________________________________________________
#
# Annotation support
class SomeStatResult(annmodel.SomeObject):
knowntype = os.stat_result
def rtyper_makerepr(self, rtyper):
from rpython.rtyper.module import r_os_stat
return r_os_stat.StatResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
assert s_attr.is_constant(), "non-constant attr name in getattr()"
attrname = s_attr.const
TYPE = STAT_FIELD_TYPES[attrname]
return lltype_to_annotation(TYPE)
def _get_rmarshall_support_(self): # for rlib.rmarshal
# reduce and recreate stat_result objects from 10-tuples
# (we ignore the extra values here for simplicity and portability)
def stat_result_reduce(st):
return (st[0], st[1], st[2], st[3], st[4],
st[5], st[6], st[7], st[8], st[9])
def stat_result_recreate(tup):
return make_stat_result(tup + extra_zeroes)
s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE)
for name, TYPE in PORTABLE_STAT_FIELDS])
extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS))
return s_reduced, stat_result_reduce, stat_result_recreate
class SomeStatvfsResult(annmodel.SomeObject):
if hasattr(os, 'statvfs_result'):
knowntype = os.statvfs_result
else:
knowntype = None # will not be used
def rtyper_makerepr(self, rtyper):
from rpython.rtyper.module import r_os_stat
return r_os_stat.StatvfsResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
assert s_attr.is_constant()
TYPE = STATVFS_FIELD_TYPES[s_attr.const]
return lltype_to_annotation(TYPE)
class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)):
def getitem((s_sta, s_int)):
assert s_int.is_constant(), "os.stat()[index]: index must be constant"
index = s_int.const
assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range"
name, TYPE = STAT_FIELDS[index]
return lltype_to_annotation(TYPE)
class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)):
def getitem((s_stat, s_int)):
assert s_int.is_constant()
name, TYPE = STATVFS_FIELDS[s_int.const]
return lltype_to_annotation(TYPE)
s_StatResult = SomeStatResult()
s_StatvfsResult = SomeStatvfsResult()
def make_stat_result(tup):
"""Turn a tuple into an os.stat_result object."""
positional = tup[:N_INDEXABLE_FIELDS]
kwds = {}
for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]):
kwds[name] = tup[N_INDEXABLE_FIELDS + i]
return os.stat_result(positional, kwds)
def make_statvfs_result(tup):
return os.statvfs_result(tup)
class MakeStatResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_stat_result
def compute_result_annotation(self, s_tup):
return s_StatResult
def specialize_call(self, hop):
from rpython.rtyper.module import r_os_stat
return r_os_stat.specialize_make_stat_result(hop)
class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_statvfs_result
def compute_result_annotation(self, s_tup):
return s_StatvfsResult
def specialize_call(self, hop):
from rpython.rtyper.module import r_os_stat
return r_os_stat.specialize_make_statvfs_result(hop)
# ____________________________________________________________
#
# RFFI support
if sys.platform.startswith('win'):
_name_struct_stat = '_stati64'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h']
else:
if sys.platform.startswith('linux'):
_name_struct_stat = 'stat64'
else:
_name_struct_stat = 'stat'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h']
compilation_info = ExternalCompilationInfo(
# This must be set to 64 on some systems to enable large file support.
#pre_include_bits = ['#define _FILE_OFFSET_BITS 64'],
# ^^^ nowadays it's always set in all C files we produce.
includes=INCLUDES
)
if TIMESPEC is not None:
class CConfig_for_timespec:
_compilation_info_ = compilation_info
TIMESPEC = TIMESPEC
TIMESPEC = lltype.Ptr(
platform.configure(CConfig_for_timespec)['TIMESPEC'])
def posix_declaration(try_to_add=None):
global STAT_STRUCT, STATVFS_STRUCT
LL_STAT_FIELDS = STAT_FIELDS[:]
if try_to_add:
LL_STAT_FIELDS.append(try_to_add)
if TIMESPEC is not None:
def _expand(lst, originalname, timespecname):
for i, (_name, _TYPE) in enumerate(lst):
if _name == originalname:
# replace the 'st_atime' field of type rffi.DOUBLE
# with a field 'st_atim' of type 'struct timespec'
lst[i] = (timespecname, TIMESPEC.TO)
break
_expand(LL_STAT_FIELDS, 'st_atime', 'st_atim')
_expand(LL_STAT_FIELDS, 'st_mtime', 'st_mtim')
_expand(LL_STAT_FIELDS, 'st_ctime', 'st_ctim')
del _expand
else:
# Replace float fields with integers
for name in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
for i, (_name, _TYPE) in enumerate(LL_STAT_FIELDS):
if _name == name:
LL_STAT_FIELDS[i] = (_name, lltype.Signed)
break
class CConfig:
_compilation_info_ = compilation_info
STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS)
try:
config = platform.configure(CConfig, ignore_errors=try_to_add is not None)
except platform.CompilationError:
if try_to_add:
return # failed to add this field, give up
raise
STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT'])
if try_to_add:
STAT_FIELDS.append(try_to_add)
# This lists only the fields that have been found on the underlying platform.
# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
# following loop.
STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
if sys.platform != 'win32':
posix_declaration()
for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
posix_declaration(ALL_STAT_FIELDS[_i])
del _i
# these two global vars only list the fields defined in the underlying platform
STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE}
STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
del _name, _TYPE
STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS)
STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS]
def build_stat_result(st):
# only for LL backends
if TIMESPEC is not None:
atim = st.c_st_atim; atime = int(atim.c_tv_sec) + 1E-9 * int(atim.c_tv_nsec)
mtim = st.c_st_mtim; mtime = int(mtim.c_tv_sec) + 1E-9 * int(mtim.c_tv_nsec)
ctim = st.c_st_ctim; ctime = int(ctim.c_tv_sec) + 1E-9 * int(ctim.c_tv_nsec)
else:
atime = st.c_st_atime
mtime = st.c_st_mtime
ctime = st.c_st_ctime
result = (st.c_st_mode,
st.c_st_ino,
st.c_st_dev,
st.c_st_nlink,
st.c_st_uid,
st.c_st_gid,
st.c_st_size,
atime,
mtime,
ctime)
if "st_blksize" in STAT_FIELD_TYPES: result += (st.c_st_blksize,)
if "st_blocks" in STAT_FIELD_TYPES: result += (st.c_st_blocks,)
if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,)
if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,)
return make_stat_result(result)
def build_statvfs_result(st):
return make_statvfs_result((
st.c_f_bsize,
st.c_f_frsize,
st.c_f_blocks,
st.c_f_bfree,
st.c_f_bavail,
st.c_f_files,
st.c_f_ffree,
st.c_f_favail,
st.c_f_flag,
st.c_f_namemax
))
def register_stat_variant(name, traits):
if name != 'fstat':
arg_is_path = True
s_arg = traits.str0
ARG1 = traits.CCHARP
else:
arg_is_path = False
s_arg = int
ARG1 = rffi.INT
if sys.platform == 'win32':
# See Win32 implementation below
posix_stat_llimpl = make_win32_stat_impl(name, traits)
return extdef(
[s_arg], s_StatResult, traits.ll_os_name(name),
llimpl=posix_stat_llimpl)
if sys.platform.startswith('linux'):
# because we always use _FILE_OFFSET_BITS 64 - this helps things work that are not a c compiler
_functions = {'stat': 'stat64',
'fstat': 'fstat64',
'lstat': 'lstat64'}
c_func_name = _functions[name]
else:
c_func_name = name
posix_mystat = rffi.llexternal(c_func_name,
[ARG1, STAT_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
@func_renamer('os_%s_llimpl' % (name,))
def posix_stat_llimpl(arg):
stresult = lltype.malloc(STAT_STRUCT.TO, flavor='raw')
try:
if arg_is_path:
arg = traits.str2charp(arg)
error = rffi.cast(rffi.LONG, posix_mystat(arg, stresult))
if arg_is_path:
traits.free_charp(arg)
if error != 0:
raise OSError(rposix.get_saved_errno(), "os_?stat failed")
return build_stat_result(stresult)
finally:
lltype.free(stresult, flavor='raw')
@func_renamer('os_%s_fake' % (name,))
def posix_fakeimpl(arg):
if s_arg == traits.str0:
arg = hlstr(arg)
st = getattr(os, name)(arg)
fields = [TYPE for fieldname, TYPE in STAT_FIELDS]
TP = TUPLE_TYPE(fields)
ll_tup = lltype.malloc(TP.TO)
for i, (fieldname, TYPE) in enumerate(STAT_FIELDS):
val = getattr(st, fieldname)
if isinstance(TYPE, lltype.Number):
rffi.setintfield(ll_tup, 'item%d' % i, int(val))
elif TYPE is lltype.Float:
setattr(ll_tup, 'item%d' % i, float(val))
else:
setattr(ll_tup, 'item%d' % i, val)
return ll_tup
return extdef(
[s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,),
llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl)
def register_statvfs_variant(name, traits):
if name != 'fstatvfs':
arg_is_path = True
s_arg = traits.str0
ARG1 = traits.CCHARP
else:
arg_is_path = False
s_arg = int
ARG1 = rffi.INT
posix_mystatvfs = rffi.llexternal(name,
[ARG1, STATVFS_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
@func_renamer('os_%s_llimpl' % (name,))
def posix_statvfs_llimpl(arg):
stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw')
try:
if arg_is_path:
arg = traits.str2charp(arg)
error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult))
if arg_is_path:
traits.free_charp(arg)
if error != 0:
raise OSError(rposix.get_saved_errno(), "os_?statvfs failed")
return build_statvfs_result(stresult)
finally:
lltype.free(stresult, flavor='raw')
@func_renamer('os_%s_fake' % (name,))
def posix_fakeimpl(arg):
if s_arg == traits.str0:
arg = hlstr(arg)
st = getattr(os, name)(arg)
fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS]
TP = TUPLE_TYPE(fields)
ll_tup = lltype.malloc(TP.TO)
for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS):
val = getattr(st, fieldname)
rffi.setintfield(ll_tup, 'item%d' % i, int(val))
return ll_tup
return extdef(
[s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,),
llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl
)
def make_win32_stat_impl(name, traits):
from rpython.rlib import rwin32
from rpython.rtyper.module.ll_win32file import make_win32_traits
win32traits = make_win32_traits(traits)
# The CRT of Windows has a number of flaws wrt. its stat() implementation:
# - time stamps are restricted to second resolution
# - file modification times suffer from forth-and-back conversions between
# UTC and local time
# Therefore, we implement our own stat, based on the Win32 API directly.
from rpython.rtyper.tool import rffi_platform as platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rlib import rwin32
assert len(STAT_FIELDS) == 10 # no extra fields on Windows
def attributes_to_mode(attributes):
m = 0
attributes = intmask(attributes)
if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY:
m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other
else:
m |= win32traits._S_IFREG
if attributes & win32traits.FILE_ATTRIBUTE_READONLY:
m |= 0444
else:
m |= 0666
return m
def attribute_data_to_stat(info):
st_mode = attributes_to_mode(info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime)
mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime)
atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime)
result = (st_mode,
0, 0, 0, 0, 0,
st_size,
atime, mtime, ctime)
return make_stat_result(result)
def by_handle_info_to_stat(info):
# similar to the one above
st_mode = attributes_to_mode(info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime)
mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime)
atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime)
# specific to fstat()
st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow)
st_nlink = info.c_nNumberOfLinks
result = (st_mode,
st_ino, 0, st_nlink, 0, 0,
st_size,
atime, mtime, ctime)
return make_stat_result(result)
def attributes_from_dir(l_path, data):
filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw')
try:
hFindFile = win32traits.FindFirstFile(l_path, filedata)
if hFindFile == rwin32.INVALID_HANDLE_VALUE:
return 0
win32traits.FindClose(hFindFile)
data.c_dwFileAttributes = filedata.c_dwFileAttributes
rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime)
rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime)
rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime)
data.c_nFileSizeHigh = filedata.c_nFileSizeHigh
data.c_nFileSizeLow = filedata.c_nFileSizeLow
return 1
finally:
lltype.free(filedata, flavor='raw')
def win32_stat_llimpl(path):
data = lltype.malloc(win32traits.WIN32_FILE_ATTRIBUTE_DATA, flavor='raw')
try:
l_path = traits.str2charp(path)
res = win32traits.GetFileAttributesEx(l_path, win32traits.GetFileExInfoStandard, data)
errcode = rwin32.GetLastError_saved()
if res == 0:
if errcode == win32traits.ERROR_SHARING_VIOLATION:
res = attributes_from_dir(l_path, data)
errcode = rwin32.GetLastError_saved()
traits.free_charp(l_path)
if res == 0:
raise WindowsError(errcode, "os_stat failed")
return attribute_data_to_stat(data)
finally:
lltype.free(data, flavor='raw')
def win32_fstat_llimpl(fd):
handle = rwin32.get_osfhandle(fd)
filetype = win32traits.GetFileType(handle)
if filetype == win32traits.FILE_TYPE_CHAR:
# console or LPT device
return make_stat_result((win32traits._S_IFCHR,
0, 0, 0, 0, 0,
0, 0, 0, 0))
elif filetype == win32traits.FILE_TYPE_PIPE:
# socket or named pipe
return make_stat_result((win32traits._S_IFIFO,
0, 0, 0, 0, 0,
0, 0, 0, 0))
elif filetype == win32traits.FILE_TYPE_UNKNOWN:
error = rwin32.GetLastError_saved()
if error != 0:
raise WindowsError(error, "os_fstat failed")
# else: unknown but valid file
# normal disk file (FILE_TYPE_DISK)
info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION,
flavor='raw', zero=True)
try:
res = win32traits.GetFileInformationByHandle(handle, info)
if res == 0:
raise WindowsError(rwin32.GetLastError_saved(),
"os_fstat failed")
return by_handle_info_to_stat(info)
finally:
lltype.free(info, flavor='raw')
if name == 'fstat':
return win32_fstat_llimpl
else:
return win32_stat_llimpl
#__________________________________________________
# Helper functions for win32
def make_longlong(high, low):
return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low)
# Seconds between 1.1.1601 and 1.1.1970
secs_between_epochs = rffi.r_longlong(11644473600)
def FILE_TIME_to_time_t_float(filetime):
ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime)
# FILETIME is in units of 100 nsec
return float(ft) * (1.0 / 10000000.0) - secs_between_epochs
def time_t_to_FILE_TIME(time, filetime):
ft = rffi.r_longlong((time + secs_between_epochs) * 10000000)
filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32)
filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits
| {
"repo_name": "jptomo/rpython-lang-scheme",
"path": "rpython/rtyper/module/ll_os_stat.py",
"copies": "1",
"size": "21189",
"license": "mit",
"hash": 1755075832420172000,
"line_mean": 34.7922297297,
"line_max": 103,
"alpha_frac": 0.5988956534,
"autogenerated": false,
"ratio": 3.346864634338967,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44457602877389674,
"avg_score": null,
"num_lines": null
} |
"""Annotation Extension for relational expressions.
https://link.springer.com/protocol/10.1007/978-1-4939-3743-1_17
Correlated function associations between genes and GOs containing contextual information.
With contextual information identify gene products that perform a role:
- only under certain conditions
- in the presence of specific factors
Gene products can have different roles:
- in different cells
- in different tissues
"""
import collections as cx
__copyright__ = "Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
# pylint: disable=too-few-public-methods
class AnnotationExtensions(object):
"""A collection of annotation extensions for one gene product."""
def __init__(self, extensions):
self.exts = extensions
def __str__(self):
hdr = "Ext({N}:{L})".format(
N=len(self.exts),
L=",".join(["{}".format(len(ext_lst)) for ext_lst in self.exts]))
txt = [hdr]
for ext_lst in self.exts:
exts_str = ", ".join(str(e) for e in ext_lst)
txt.append("[{TXT}]".format(TXT=exts_str))
return " ".join(txt)
def get_relations_cnt(self):
"""Get the set of all relations."""
return cx.Counter([e.relation for es in self.exts for e in es])
# Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
| {
"repo_name": "tanghaibao/goatools",
"path": "goatools/anno/extensions/extensions.py",
"copies": "1",
"size": "1426",
"license": "bsd-2-clause",
"hash": -4486748324802402300,
"line_mean": 30.6888888889,
"line_max": 92,
"alpha_frac": 0.6486676017,
"autogenerated": false,
"ratio": 3.6752577319587627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9821482151077849,
"avg_score": 0.0004886365161827286,
"num_lines": 45
} |
'''Annotation of ir.
Handles simple annotations, particularly marking results that are ignored.
'''
from __future__ import absolute_import
from ..runtime.multimethod import MultiMethod, defmethod
from . import ir as I
def annotate(ir):
annotate_node(ir)
annotate_children(ir)
annotate_node = MultiMethod('annotate_node')
@defmethod(annotate_node, [I.node])
def meth(node):
pass
annotate_children = MultiMethod('annotate_children')
@defmethod(annotate_children, [I.node])
def meth(node):
for child in I.iter_children(node):
annotate(child)
@defmethod(annotate_node, [I.unary_base])
def meth(ub):
ub.op.result_ignored = ub.result_ignored
@defmethod(annotate_node, [I.binary_base])
def meth(bb):
bb.lop.result_ignored = bb.rop.result_ignored = bb.result_ignored
@defmethod(annotate_node, [I.buildslice])
def meth(bs):
bs.start.result_ignored = bs.stop.result_ignored = bs.step.result_ignored = bs.result_ignored
@defmethod(annotate_node, [I.progn])
def meth(p):
exprs = list(p.exprs)
return_index = len(exprs) - 1
if p.result_ignored:
return_index += 1
for index,expr in enumerate(exprs):
expr.result_ignored = index < return_index
@defmethod(annotate_node, [I.if_])
def meth(c):
c.then.result_ignored = c.else_.result_ignored = c.result_ignored
@defmethod(annotate_node, [I.trycatch])
def meth(tc):
tc.body.result_ignored = True
tc.catch.result_ignored = True
@defmethod(annotate_node, [I.tryfinally])
def meth(tf):
tf.body.result_ignored =True
tf.finally_.result_ignored = True
@defmethod(annotate_node, [I.tag])
def meth(tg):
tg.result_ignored = True
tg.body.result_ignored = True
@defmethod(annotate_node, [I.preeval])
def meth(pre):
pre.expression.result_ignored = pre.result_ignored
@defmethod(annotate_node, [I.toplevel])
def meth(tp):
tp.expression.result_ignored = tp.result_ignored
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/compiler/annotate.py",
"copies": "1",
"size": "1916",
"license": "apache-2.0",
"hash": 8430256325796180000,
"line_mean": 24.2105263158,
"line_max": 97,
"alpha_frac": 0.7030271399,
"autogenerated": false,
"ratio": 3.130718954248366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9303527352233135,
"avg_score": 0.006043748383046057,
"num_lines": 76
} |
"""Annotation parsing. For links inside annotations see test_inspect.TypeLinks."""
import sys
from typing import List, Tuple, Dict, Any, Union, Optional, Callable, TypeVar, Generic, Iterator
class Foo:
"""A class with properties"""
@property
def a_property(self) -> List[bool]:
"""A property with a type annotation"""
pass
class FooSlots:
"""A class with slots"""
__slots__ = ['unannotated', 'annotated']
annotated: List[str]
_T = TypeVar('Tp')
# Triggers a corner case with _gorg on Py3.6 (the member has to be ignored).
# AContainer2 is not derived directly from Generic but has _gorg also.
# Additionally, on Py3.6 these classes will have a __next_in_mro__ member,
# which should be ignored as well
class AContainer(Generic[_T]):
"""A generic class. No parent class info extracted yet."""
class AContainer2(Iterator):
"""Another class derived from a typing thing."""
def annotation(param: List[int], another: bool, third: str = "hello") -> float:
"""An annotated function"""
pass
def annotation_strings(param: 'List[int]', another: 'bool', third: 'str' = "hello") -> 'float':
"""Annotated using strings, should result in exactly the same as annotation()"""
pass
def no_annotation(a, b, z):
"""Non-annotated function"""
pass
def no_annotation_default_param(param, another, third = "hello"):
"""Non-annotated function with a default parameter"""
pass
def partial_annotation(foo, param: Tuple[int, int], unannotated, cls: object):
"""Partially annotated function"""
pass
def annotation_tuple_instead_of_tuple(a: (float, int)):
"""Annotation with a tuple instead of Tuple, ignored"""
def annotation_func_instead_of_type(a: open):
"""Annotation with a function instead of a type, ignored"""
def annotation_func_instead_of_type_nested(a: List[open], b: Callable[[open], str], c: Callable[[str], open]):
"""Annotations with nested problems, ignoring the whole thing"""
def annotation_any(a: Any):
"""Annotation with the Any type"""
def annotation_union(a: Union[float, int]):
"""Annotation with the Union type"""
def annotation_optional(a: Optional[float]):
"""Annotation with the Optional type"""
def annotation_union_second_bracketed(a: Union[float, List[int]]):
"""Annotation with the Union type and second type bracketed, where we can't use isinstance"""
def annotation_union_of_undefined(a: Union[int, 'something.Undefined']):
"""Annotation with an union that has an undefined type inside, where we can't use isinstance either"""
def annotation_list_noparam(a: List):
"""Annotation with the unparametrized List type. 3.7 adds an implicit TypeVar to it, 3.6 not, emulate that to make the test pass on older versions"""
if sys.version_info < (3, 7):
annotation_list_noparam.__annotations__['a'] = List[TypeVar('T')]
def annotation_generic(a: List[_T]) -> _T:
"""Annotation with a generic type"""
def annotation_callable(a: Callable[[float, int], str]):
"""Annotation with the Callable type"""
def annotation_callable_no_args(a: Callable[[], Dict[int, float]]):
"""Annotation with the Callable type w/o arguments"""
def annotation_ellipsis(a: Callable[..., int], b: Tuple[str, ...]):
"""Annotation with ellipsis"""
# Only possible with native code now, https://www.python.org/dev/peps/pep-0570/
#def positionals_only(positional_only, /, positional_kw):
#"""Function with explicitly delimited positional args"""
#pass
def args_kwargs(a, b, *args, **kwargs):
"""Function with args and kwargs"""
pass
def positional_keyword(positional_kw, *, kw_only):
"""Function with explicitly delimited keyword args"""
pass
def annotated_positional_keyword(bar = False, *, foo: str, **kwargs):
"""Function with explicitly delimited keyword args and type annotations"""
pass
def returns_none(a: Callable[[], None]) -> None:
"""In order to disambiguate between a missing return annotation and an
annotated none, the None return annotation is kept, converted from NoneType
to None"""
def returns_none_type(a: Callable[[], type(None)]) -> type(None):
"""And it should behave the same when using None or type(None)"""
UNANNOTATED_VAR = 3.45
ANNOTATED_VAR: Tuple[bool, str] = (False, 'No.')
| {
"repo_name": "mosra/m.css",
"path": "documentation/test_python/inspect_annotations/inspect_annotations.py",
"copies": "1",
"size": "4294",
"license": "mit",
"hash": -2427413527489259500,
"line_mean": 34.4876033058,
"line_max": 153,
"alpha_frac": 0.688402422,
"autogenerated": false,
"ratio": 3.7241977450130097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.491260016701301,
"avg_score": null,
"num_lines": null
} |
"""annotations
Revision ID: ddd6ebdd853b
Revises: ca69c70ec99b
Create Date: 2017-09-13 16:36:39.144489
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ddd6ebdd853b'
down_revision = 'ca69c70ec99b'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'annotation_layer',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=250), nullable=True),
sa.Column('descr', sa.Text(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'annotation',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start_dttm', sa.DateTime(), nullable=True),
sa.Column('end_dttm', sa.DateTime(), nullable=True),
sa.Column('layer_id', sa.Integer(), nullable=True),
sa.Column('short_descr', sa.String(length=500), nullable=True),
sa.Column('long_descr', sa.Text(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['layer_id'], [u'annotation_layer.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
'ti_dag_state',
'annotation', ['layer_id', 'start_dttm', 'end_dttm'], unique=False)
def downgrade():
op.drop_index('ti_dag_state', table_name='annotation')
op.drop_table('annotation')
op.drop_table('annotation_layer')
| {
"repo_name": "alanmcruickshank/superset-dev",
"path": "superset/migrations/versions/ddd6ebdd853b_annotations.py",
"copies": "1",
"size": "2198",
"license": "apache-2.0",
"hash": -1824422606638072600,
"line_mean": 38.25,
"line_max": 75,
"alpha_frac": 0.6237488626,
"autogenerated": false,
"ratio": 3.4290171606864273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4552766023286427,
"avg_score": null,
"num_lines": null
} |
"""Announcement configuration.
The announcement configuration allows to adapt JSON objects for user data
administration, template adaptation, and changing the values to use for
substitutions into the templates, and global common settings for your
scenario.
This file needs adaptation work to match your surrounding environment first.
SMTP and HTML knowledge will be helpful.
MIT licensed, Copyright (c) 2015
@author Oliver Merkel, Merkel(dot)Oliver(at)web(dot)de.
All rights reserved.
"""
mail = {
"Release Candidate Announcement" : {
"To" : [ "Releases <releases@your-company.com>" ],
"CC" : [ "Customer <poc@customer-company.com>", "Project Leader <projectlead@your-company.com>" ],
"Subject" : "${productversion} ${producttype} - Release Candidate ${calendarmonth} available",
"Body" : [
"<html xmlns='http://www.w3.org/TR/REC-html40'><head>",
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=iso-8859-1'><style><!--",
"@font-face {font-family:Arial;panose-1:2 15 5 2 2 2 4 3 2 4;}",
"p {margin:0cm;margin-bottom:12.0pt;font-size:10.0pt;font-family:'Arial','sans-serif';}",
"p.signature {margin-left:20.0pt;margin-bottom:8.0pt;font-size:8.0pt;color:black;}",
"--></style></head>",
"<body><p>Dear all,</p>",
"<p>We are near to ${producttype} of ${productversion} again.</p>",
"<p>Release Candidate of ${productversion} for ${calendarmonth} is available on<br>",
"<a href='file:///${path}'>${path}</a></p>",
"${signature}",
"</body></html>"
],
"attachments" : "releasenotesattachments"
},
"Final Delivery Build" : {
"To" : [ "Releases <releases@your-company.com>" ],
"CC" : [ "Integration <int@your-company.com>", "Project Leader <projectlead@your-company.com>" ],
"Subject" : "${productversion} ${producttype} ${calendarmonth} - final delivery build started",
"Body" : [
"<html xmlns='http://www.w3.org/TR/REC-html40'><head>",
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=iso-8859-1'><style><!--",
"@font-face {font-family:Arial;panose-1:2 15 5 2 2 2 4 3 2 4;}",
"p {margin:0cm;margin-bottom:12.0pt;font-size:10.0pt;font-family:'Arial','sans-serif';}",
"p.signature {margin-left:20.0pt;margin-bottom:8.0pt;font-size:8.0pt;color:black;}",
"--></style></head>",
"<body><p>Dear all,</p>",
"<p>Please note that final delivery build of ${productversion} for ${calendarmonth} has been started on<br>",
"<a href='${buildserver}'>${buildserver}</a></p>",
"${signature}",
"</body></html>"
]
},
"Weekly Brown Bag Meeting" : {
"To" : [ "ProjectDevelopers <ProjectDevelopers@your-company.com>" ],
"CC" : [ "Integration <int@your-company.com>", "Project Leader <projectlead@your-company.com>" ],
"Subject" : "Reminder: ${productversion} Wednesday 11am - Weekly Brown Bag Meeting",
"Body" : [
"<html xmlns='http://www.w3.org/TR/REC-html40'><head>",
"<META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=iso-8859-1'><style><!--",
"@font-face {font-family:Arial;panose-1:2 15 5 2 2 2 4 3 2 4;}",
"p {margin:0cm;margin-bottom:12.0pt;font-size:10.0pt;font-family:'Arial','sans-serif';}",
"p.signature {margin-left:20.0pt;margin-bottom:8.0pt;font-size:8.0pt;color:black;}",
"--></style></head>",
"<body><p>Dear all,</p>",
"<p>This is a kind reminder and invitation to our Weekly Wednesday Brown Bag Meeting. Feel free to join.</p>",
"<p>The meeting is usually helpful to socialize, sync on project progress and get help on impediments as well.</p>",
"<p>Attached you will find some suggestions to pick up a brown bag nearby in case you missed to bring your lunch.</p>",
"<p>See ya in a bit... ;-)</p>",
"${signature}",
"</body></html>"
],
"attachments" : "lunchsuggestions"
}
}
users = {
"UserIDorName1" : {
"Signature" : [
"<p style='color:#1F497D;margin-bottom:0.1pt'>Kind Regards / Mit freundlichen Grüßen<br>",
" FirstName1 LastName1</p>",
"<hr />",
"<p class=signature><span style='font-size:10.0pt; color:#1F497D'>FirstName1 LastName1, <span style='font-size:8.0pt'>Title</span></span><br>",
"Position, OU</p>",
"<p class=signature>Phone: +49 1234 5678 90<br>Fax: +49 1234 5678 09</p>" ],
"Friendly Name" : "LastName1, FirstName1" ,
"Mail" : [ "FirstName1.LastName1@your-company.com" ]
}, "UserIDorName2" : {
"Signature" : [
"<p>Best regards, FirstName2 LastName2</p>" ],
"Friendly Name" : "LastName2, FirstName2",
"Mail" : [ "FirstName2.LastName2@your-company.com" ]
}
}
current = { "productversion" : "MyProduct1.1-abc-details",
"producttype" : "Monthly Release",
"calendarmonth" : "15.04",
"path" : "\\\\someserver\\share\\path1\\path2\\directory",
"buildserver" : "http://somebuildserver",
"releasenotesattachments" : [
"\\\\someserver\\share\\path1\\path2\\directory\\ReleaseNotes\\Release_Notes.pdf",
"\\\\someserver\\share\\path1\\path2\\directory\\Testing\\TestReport.pdf",
"\\\\someserver\\share\\path1\\path2\\directory\\CHANGELOG.txt"
],
"lunchsuggestions" : [
"\\\\someserver\\share\\path3\\italian\\pizza_and_pasta.pdf",
"\\\\someserver\\share\\path4\\indian\\vegetarian_spicy.pdf",
"\\\\someserver\\share\\path5\\german\\schnitzel_and_bratwurst.pdf"
]
}
common = { "replymailaddr" : "Release Group <releases@your-company.com>",
"mailserver" : "mymailserver.your-company.com" }
| {
"repo_name": "OMerkel/Announcement",
"path": "announcementconfig.py",
"copies": "1",
"size": "5548",
"license": "mit",
"hash": -4968152670703105000,
"line_mean": 47.6666666667,
"line_max": 149,
"alpha_frac": 0.6387887527,
"autogenerated": false,
"ratio": 3.0416666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41804554193666665,
"avg_score": null,
"num_lines": null
} |
"""AnnouncementExternalFeeds API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class AnnouncementExternalFeedsAPI(BaseCanvasAPI):
"""AnnouncementExternalFeeds API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AnnouncementExternalFeedsAPI."""
super(AnnouncementExternalFeedsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.AnnouncementExternalFeedsAPI")
def list_external_feeds_courses(self, course_id):
"""
List external feeds.
Returns the list of External Feeds this course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
self.logger.debug("GET /api/v1/courses/{course_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_feeds".format(**path), data=data, params=params, all_pages=True)
def list_external_feeds_groups(self, group_id):
"""
List external feeds.
Returns the list of External Feeds this course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
self.logger.debug("GET /api/v1/groups/{group_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/external_feeds".format(**path), data=data, params=params, all_pages=True)
def create_external_feed_courses(self, url, course_id, header_match=None, verbosity=None):
"""
Create an external feed.
Create a new external feed for the course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - url
"""The url to the external rss or atom feed"""
data["url"] = url
# OPTIONAL - header_match
"""If given, only feed entries that contain this string in their title will be imported"""
if header_match is not None:
data["header_match"] = header_match
# OPTIONAL - verbosity
"""Defaults to 'full'"""
if verbosity is not None:
self._validate_enum(verbosity, ["full", "truncate", "link_only"])
data["verbosity"] = verbosity
self.logger.debug("POST /api/v1/courses/{course_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/external_feeds".format(**path), data=data, params=params, single_item=True)
def create_external_feed_groups(self, url, group_id, header_match=None, verbosity=None):
"""
Create an external feed.
Create a new external feed for the course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - url
"""The url to the external rss or atom feed"""
data["url"] = url
# OPTIONAL - header_match
"""If given, only feed entries that contain this string in their title will be imported"""
if header_match is not None:
data["header_match"] = header_match
# OPTIONAL - verbosity
"""Defaults to 'full'"""
if verbosity is not None:
self._validate_enum(verbosity, ["full", "truncate", "link_only"])
data["verbosity"] = verbosity
self.logger.debug("POST /api/v1/groups/{group_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/groups/{group_id}/external_feeds".format(**path), data=data, params=params, single_item=True)
def delete_external_feed_courses(self, course_id, external_feed_id):
"""
Delete an external feed.
Deletes the external feed.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - external_feed_id
"""ID"""
path["external_feed_id"] = external_feed_id
self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
def delete_external_feed_groups(self, group_id, external_feed_id):
"""
Delete an external feed.
Deletes the external feed.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - external_feed_id
"""ID"""
path["external_feed_id"] = external_feed_id
self.logger.debug("DELETE /api/v1/groups/{group_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/groups/{group_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
class Externalfeed(BaseModel):
"""Externalfeed Model."""
def __init__(self, display_name=None, url=None, created_at=None, id=None, header_match=None, verbosity=None):
"""Init method for Externalfeed class."""
self._display_name = display_name
self._url = url
self._created_at = created_at
self._id = id
self._header_match = header_match
self._verbosity = verbosity
self.logger = logging.getLogger('py3canvas.Externalfeed')
@property
def display_name(self):
"""The title of the feed, pulled from the feed itself. If the feed hasn't yet been pulled, a temporary name will be synthesized based on the URL."""
return self._display_name
@display_name.setter
def display_name(self, value):
"""Setter for display_name property."""
self.logger.warn("Setting values on display_name will NOT update the remote Canvas instance.")
self._display_name = value
@property
def url(self):
"""The HTTP/HTTPS URL to the feed."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def created_at(self):
"""When this external feed was added to Canvas."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def id(self):
"""The ID of the feed."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def header_match(self):
"""If not null, only feed entries whose title contains this string will trigger new posts in Canvas."""
return self._header_match
@header_match.setter
def header_match(self, value):
"""Setter for header_match property."""
self.logger.warn("Setting values on header_match will NOT update the remote Canvas instance.")
self._header_match = value
@property
def verbosity(self):
"""The verbosity setting determines how much of the feed's content is imported into Canvas as part of the posting. 'link_only' means that only the title and a link to the item. 'truncate' means that a summary of the first portion of the item body will be used. 'full' means that the full item body will be used."""
return self._verbosity
@verbosity.setter
def verbosity(self, value):
"""Setter for verbosity property."""
self.logger.warn("Setting values on verbosity will NOT update the remote Canvas instance.")
self._verbosity = value
| {
"repo_name": "tylerclair/py3canvas",
"path": "py3canvas/apis/announcement_external_feeds.py",
"copies": "1",
"size": "8916",
"license": "mit",
"hash": -6139625142390800000,
"line_mean": 36.3054393305,
"line_max": 322,
"alpha_frac": 0.6166442351,
"autogenerated": false,
"ratio": 4.050885960926851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005129690917200797,
"num_lines": 239
} |
"""AnnouncementExternalFeeds API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from base import BaseCanvasAPI
from base import BaseModel
class AnnouncementExternalFeedsAPI(BaseCanvasAPI):
"""AnnouncementExternalFeeds API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AnnouncementExternalFeedsAPI."""
super(AnnouncementExternalFeedsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("pycanvas.AnnouncementExternalFeedsAPI")
def list_external_feeds_courses(self, course_id):
"""
List external feeds.
Returns the list of External Feeds this course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
self.logger.debug("GET /api/v1/courses/{course_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_feeds".format(**path), data=data, params=params, all_pages=True)
def list_external_feeds_groups(self, group_id):
"""
List external feeds.
Returns the list of External Feeds this course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
self.logger.debug("GET /api/v1/groups/{group_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/external_feeds".format(**path), data=data, params=params, all_pages=True)
def create_external_feed_courses(self, url, course_id, header_match=None, verbosity=None):
"""
Create an external feed.
Create a new external feed for the course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - url
"""The url to the external rss or atom feed"""
data["url"] = url
# OPTIONAL - header_match
"""If given, only feed entries that contain this string in their title will be imported"""
if header_match is not None:
data["header_match"] = header_match
# OPTIONAL - verbosity
"""Defaults to "full""""
if verbosity is not None:
self._validate_enum(verbosity, ["full", "truncate", "link_only"])
data["verbosity"] = verbosity
self.logger.debug("POST /api/v1/courses/{course_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/external_feeds".format(**path), data=data, params=params, single_item=True)
def create_external_feed_groups(self, url, group_id, header_match=None, verbosity=None):
"""
Create an external feed.
Create a new external feed for the course or group.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - url
"""The url to the external rss or atom feed"""
data["url"] = url
# OPTIONAL - header_match
"""If given, only feed entries that contain this string in their title will be imported"""
if header_match is not None:
data["header_match"] = header_match
# OPTIONAL - verbosity
"""Defaults to "full""""
if verbosity is not None:
self._validate_enum(verbosity, ["full", "truncate", "link_only"])
data["verbosity"] = verbosity
self.logger.debug("POST /api/v1/groups/{group_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/groups/{group_id}/external_feeds".format(**path), data=data, params=params, single_item=True)
def delete_external_feed_courses(self, course_id, external_feed_id):
"""
Delete an external feed.
Deletes the external feed.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - external_feed_id
"""ID"""
path["external_feed_id"] = external_feed_id
self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
def delete_external_feed_groups(self, group_id, external_feed_id):
"""
Delete an external feed.
Deletes the external feed.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - external_feed_id
"""ID"""
path["external_feed_id"] = external_feed_id
self.logger.debug("DELETE /api/v1/groups/{group_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/groups/{group_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
class Externalfeed(BaseModel):
"""Externalfeed Model."""
def __init__(self, display_name=None, url=None, created_at=None, id=None, header_match=None, verbosity=None):
"""Init method for Externalfeed class."""
self._display_name = display_name
self._url = url
self._created_at = created_at
self._id = id
self._header_match = header_match
self._verbosity = verbosity
self.logger = logging.getLogger('pycanvas.Externalfeed')
@property
def display_name(self):
"""The title of the feed, pulled from the feed itself. If the feed hasn't yet been pulled, a temporary name will be synthesized based on the URL."""
return self._display_name
@display_name.setter
def display_name(self, value):
"""Setter for display_name property."""
self.logger.warn("Setting values on display_name will NOT update the remote Canvas instance.")
self._display_name = value
@property
def url(self):
"""The HTTP/HTTPS URL to the feed."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def created_at(self):
"""When this external feed was added to Canvas."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def id(self):
"""The ID of the feed."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def header_match(self):
"""If not null, only feed entries whose title contains this string will trigger new posts in Canvas."""
return self._header_match
@header_match.setter
def header_match(self, value):
"""Setter for header_match property."""
self.logger.warn("Setting values on header_match will NOT update the remote Canvas instance.")
self._header_match = value
@property
def verbosity(self):
"""The verbosity setting determines how much of the feed's content is imported into Canvas as part of the posting. 'link_only' means that only the title and a link to the item. 'truncate' means that a summary of the first portion of the item body will be used. 'full' means that the full item body will be used."""
return self._verbosity
@verbosity.setter
def verbosity(self, value):
"""Setter for verbosity property."""
self.logger.warn("Setting values on verbosity will NOT update the remote Canvas instance.")
self._verbosity = value
| {
"repo_name": "PGower/PyCanvas",
"path": "pycanvas/apis/announcement_external_feeds.py",
"copies": "1",
"size": "9151",
"license": "mit",
"hash": 4290146231184314400,
"line_mean": 36.2887029289,
"line_max": 322,
"alpha_frac": 0.6005900994,
"autogenerated": false,
"ratio": 4.151996370235935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030300804071674444,
"num_lines": 239
} |
"""Announcement SMTP based library.
The announcement SMTP based library with a simplified command line startup.
MIT licensed, Copyright (c) 2015
@author Oliver Merkel, Merkel(dot)Oliver(at)web(dot)de.
All rights reserved.
"""
from announcementconfig import *
from smtplib import SMTP
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.utils import formataddr
from email.utils import COMMASPACE
from base64 import encodebytes
import os
import getpass
import sys
from email.base64mime import body_encode as encode_base64
class Announcement:
FROM = 'From'
TO = 'To'
CC = 'CC'
REPLYTO = 'Reply-To'
SUBJECT = 'Subject'
BODY = 'Body'
SIGNATURE = 'Signature'
FRIENDLYNAME = 'Friendly Name'
PATH = 'path'
MAIL = 'Mail'
ATTACHMENTS = 'attachments'
user = None
mailType = None
def __init__(self, user, mailType):
self.setUser(user)
self.setMailType(mailType)
def setUser(self, user):
self.user = user
self.userData = users[user]
self.update()
def setMailType(self, mailType):
self.mailType = mailType
self.update()
def update(self):
if self.user and self.mailType:
self.renderHeader()
self.renderBody()
self.renderMessage()
def getShortAddressList(self):
return self.userData[self.MAIL]
def getFullAddressList(self):
return [ '"%s" <%s>' % (self.userData[self.FRIENDLYNAME], x) \
for x in self.getShortAddressList() ]
def renderBody(self):
signature = '\n'.join(self.userData[self.SIGNATURE])
body = '\n'.join(mail[self.mailType][self.BODY])
body = Template(body).safe_substitute(current)
self.body = Template(body).safe_substitute(signature=signature)
def getSubject(self):
return Template(mail[self.mailType][self.SUBJECT]).safe_substitute(current)
def renderHeader(self):
result = {}
result[self.SUBJECT] = self.getSubject()
result[self.TO] = mail[self.mailType][self.TO]
result[self.CC] = mail[self.mailType][self.CC]
self.header = result
def renderMessage(self):
msg = MIMEMultipart('alternative')
msg[self.SUBJECT] = self.header[self.SUBJECT]
msg[self.FROM] = self.getFullAddressList()[0]
msg[self.TO] = COMMASPACE.join(self.header[self.TO])
msg[self.CC] = COMMASPACE.join(self.header[self.CC])
msg[self.REPLYTO] = common['replymailaddr']
partHtml = MIMEText(self.body, 'html')
msg.attach(partHtml)
self.message = msg
def sendMail( self, password ):
with SMTP( common['mailserver'], 587 ) as smtp:
smtp.set_debuglevel(1)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
if not (hex(sys.hexversion) == '0x30500f0'):
smtp.login( self.getShortAddressList()[0], password )
else:
# Application side workaround for http://bugs.python.org/issue25446
# smtp.login using AUTH LOGIN mechanism is broken in released Python 3.5.0
#
# prefer AUTH LOGIN over other mechanism if available
if not "LOGIN" in smtp.esmtp_features["auth"].split():
# best effort approach: allow any other mechanism
smtp.login( self.getShortAddressList()[0], password )
else:
(code, resp) = smtp.docmd("AUTH", "LOGIN " +
encode_base64(self.getShortAddressList()[0].encode('ascii'), eol=''))
if not code == 334:
raise SMTPException("Authentication not possible with this login.")
(code, resp) = smtp.docmd(encode_base64(password.encode('ascii'), eol=''))
# 235 : 'Authentication successful'
# 503 : 'Error: already authenticated'
if not code in (235, 503):
raise SMTPException("Authentication unsuccessful.")
smtp.sendmail( self.message[self.FROM],
self.header[self.TO] + self.header[self.CC], self.message.as_string() )
def sendTestMail(self):
to = COMMASPACE.join( self.userData[self.MAIL] )
with SMTP( common['mailserver'] ) as smtp:
smtp.sendmail( self.message[self.FROM], to, self.message.as_string() )
def getAttachmentKey( self ):
return mail[self.mailType][self.ATTACHMENTS] if self.ATTACHMENTS in mail[self.mailType].keys() else None
def attachmentsMissing( self ):
result = False
key = self.getAttachmentKey()
if not key is None:
for filePath in current[key]:
fileName = filePath.split('\\')[-1]
if not os.path.exists(filePath) or not os.path.isfile(filePath):
print('Error: Missing %s ( %s )' % (fileName, filePath))
result = True
return result
def attach(self):
result = True
key = self.getAttachmentKey()
if not key is None:
for filePath in current[key]:
fileName = filePath.split('\\')[-1]
if os.path.exists(filePath) and os.path.isfile(filePath):
fh = open(filePath,'rb')
part = MIMEBase('application', "octet-stream")
part.set_payload(encodebytes(fh.read()).decode())
fh.close()
part.add_header('Content-Transfer-Encoding', 'base64')
part.add_header('Content-Disposition', 'attachment; filename="%s"' % fileName)
self.message.attach(part)
else:
print('Error: Missing %s ( %s )' % (fileName, filePath))
result = False
return result
if '__main__' == __name__:
import argparse
userList = sorted(list(users.keys()))
mailTypeList = sorted(list(mail.keys()))
parser = argparse.ArgumentParser(description='Announcement Mailer.')
parser.add_argument('-u', '--user', nargs=1, type=str,
choices=userList, default=[userList[0]],
help='User to send the announcement. ' +
'Default is to use ' + userList[0])
parser.add_argument('-t', '--type', nargs=1, type=str,
choices=mailTypeList, default=[mailTypeList[0]],
help='Mail type of announcement. ' +
('Default is to use "%s"' % mailTypeList[0]))
parser.add_argument('-s', '--send', nargs=1, type=str,
choices=['console', 'test', 'serious'],
default=['console'], help="""How and where to send output.
"console" shows the mail on console only.
"test" fakes mail by sending to own mail address only.
"serious" sends out real mail.
Default is to use console""" )
args = parser.parse_args()
user = args.user[0]
sendOutput = args.send[0]
mailType = args.type[0]
announcement = Announcement(user, mailType)
if 'console' == sendOutput:
print(announcement.message.as_string())
else:
if announcement.attach():
if 'test' == sendOutput:
announcement.sendTestMail()
else:
password = getpass.getpass('Hello %s. Please enter your password: ' % user).strip()
announcement.sendMail(password)
else:
print("Error: could not include attachments")
| {
"repo_name": "OMerkel/Announcement",
"path": "announcement.py",
"copies": "1",
"size": "6834",
"license": "mit",
"hash": 5222722947725391000,
"line_mean": 33.6903553299,
"line_max": 108,
"alpha_frac": 0.655253146,
"autogenerated": false,
"ratio": 3.5612298071912454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9604075187789753,
"avg_score": 0.022481553080298262,
"num_lines": 197
} |
# Announces data from flight and science files as merged
# JSON documents to ZeroMQ
#
# ZMQ JSON Messages:
# * set_start: Announces the start time and glider
# - Use start time and glider to differentiate sets if necessary
# * set_data: Announces a row of data
# * set_end: Announces the end of a glider data set
#
# By: Michael Lindemuth
# University of South Florida
# College of Marine Science
# Ocean Technology Group
from pyinotify import(
ProcessEvent
)
import logging
logger = logging.getLogger("GSPS")
import zmq
import time
from datetime import datetime
from glider_binary_data_reader import (
GliderBDReader,
MergedGliderBDReader
)
FLIGHT_SCIENCE_PAIRS = [('dbd', 'ebd'), ('sbd', 'tbd'), ('mbd', 'nbd')]
class GliderFileProcessor(ProcessEvent):
def __init__(self, port=8008):
ProcessEvent.__init__(self)
self.glider_data = {}
self.port = port
def publish_segment_pair(self, glider, path, file_base, pair):
# Create ZMQ context and socket for publishing files
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:%d" % self.port)
segment_id = int(file_base[file_base.rfind('-')+1:file_base.find('.')])
logger.info(
"Publishing glider %s segment %d data in %s named %s pair %s"
% (glider, segment_id, path, file_base, pair)
)
set_timestamp = datetime.utcnow()
flight_file = file_base + pair[0]
science_file = file_base + pair[1]
flight_reader = GliderBDReader(
path,
pair[0],
[flight_file]
)
science_reader = GliderBDReader(
path,
pair[1],
[science_file]
)
merged_reader = MergedGliderBDReader(
flight_reader, science_reader
)
socket.send_json({
'message_type': 'set_start',
'start': set_timestamp.isoformat(),
'flight_type': pair[0],
'flight_file': flight_file,
'science_file': science_file,
'science_type': pair[1],
'glider': glider,
'segment': segment_id,
'headers': merged_reader.headers
})
for value in merged_reader:
socket.send_json({
'message_type': 'set_data',
'glider': glider,
'start': set_timestamp.isoformat(),
'data': value
})
time.sleep(0.01)
socket.send_json({
'message_type': 'set_end',
'glider': glider,
'start': set_timestamp.isoformat(),
})
self.glider_data[glider]['files'].remove(flight_file)
self.glider_data[glider]['files'].remove(science_file)
def check_for_pair(self, event):
if len(event.name) > 0 and event.name[0] is not '.':
# Add full path to glider data queue
glider_name = event.path[event.path.rfind('/')+1:]
if glider_name not in self.glider_data:
self.glider_data[glider_name] = {}
self.glider_data[glider_name]['path'] = event.path
self.glider_data[glider_name]['files'] = []
self.glider_data[glider_name]['files'].append(event.name)
fileType = event.name[-3:]
# Check for matching pair
for pair in FLIGHT_SCIENCE_PAIRS:
checkFile = None
if fileType == pair[0]:
checkFile = event.name[:-3] + pair[1]
elif fileType == pair[1]:
checkFile = event.name[:-3] + pair[0]
if checkFile in self.glider_data[glider_name]['files']:
try:
self.publish_segment_pair(
glider_name, event.path, event.name[:-3], pair
)
except Exception, e:
logger.error("Error processing pair %s: %s"
% (event.name[:-3], e))
def valid_extension(self, name):
extension = name[name.rfind('.')+1:]
for pair in FLIGHT_SCIENCE_PAIRS:
if extension == pair[0] or extension == pair[1]:
return True
logger.error("Unrecognized file extension for event: %s" % extension)
return False
def process_IN_CLOSE(self, event):
if self.valid_extension(event.name):
self.check_for_pair(event)
def process_IN_MOVED_TO(self, event):
if self.valid_extension(event.name):
self.check_for_pair(event)
| {
"repo_name": "USF-COT/GSPS",
"path": "glider_singleton_publishing_service/processor.py",
"copies": "1",
"size": "4655",
"license": "mit",
"hash": -5789365784431935000,
"line_mean": 30.6666666667,
"line_max": 79,
"alpha_frac": 0.5435016112,
"autogenerated": false,
"ratio": 3.7845528455284554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9818664864264328,
"avg_score": 0.0018779184928253256,
"num_lines": 147
} |
"""An NTM's memory implementation."""
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
def _convolve(w, s):
"""Circular convolution implementation."""
assert s.size(0) == 3
t = torch.cat([w[-1:], w, w[:1]])
c = F.conv1d(t.view(1, 1, -1), s.view(1, 1, -1)).view(-1)
return c
class NTMMemory(nn.Module):
"""Memory bank for NTM."""
def __init__(self, N, M):
"""Initialize the NTM Memory matrix.
The memory's dimensions are (batch_size x N x M).
Each batch has it's own memory matrix.
:param N: Number of rows in the memory.
:param M: Number of columns/features in the memory.
"""
super(NTMMemory, self).__init__()
self.N = N
self.M = M
# The memory bias allows the heads to learn how to initially address
# memory locations by content
self.register_buffer('mem_bias', torch.Tensor(N, M))
# Initialize memory bias
stdev = 1 / (np.sqrt(N + M))
nn.init.uniform_(self.mem_bias, -stdev, stdev)
def reset(self, batch_size):
"""Initialize memory from bias, for start-of-sequence."""
self.batch_size = batch_size
self.memory = self.mem_bias.clone().repeat(batch_size, 1, 1)
def size(self):
return self.N, self.M
def read(self, w):
"""Read from memory (according to section 3.1)."""
return torch.matmul(w.unsqueeze(1), self.memory).squeeze(1)
def write(self, w, e, a):
"""write to memory (according to section 3.2)."""
self.prev_mem = self.memory
self.memory = torch.Tensor(self.batch_size, self.N, self.M)
erase = torch.matmul(w.unsqueeze(-1), e.unsqueeze(1))
add = torch.matmul(w.unsqueeze(-1), a.unsqueeze(1))
self.memory = self.prev_mem * (1 - erase) + add
def address(self, k, β, g, s, γ, w_prev):
"""NTM Addressing (according to section 3.3).
Returns a softmax weighting over the rows of the memory matrix.
:param k: The key vector.
:param β: The key strength (focus).
:param g: Scalar interpolation gate (with previous weighting).
:param s: Shift weighting.
:param γ: Sharpen weighting scalar.
:param w_prev: The weighting produced in the previous time step.
"""
# Content focus
wc = self._similarity(k, β)
# Location focus
wg = self._interpolate(w_prev, wc, g)
ŵ = self._shift(wg, s)
w = self._sharpen(ŵ, γ)
return w
def _similarity(self, k, β):
k = k.view(self.batch_size, 1, -1)
w = F.softmax(β * F.cosine_similarity(self.memory + 1e-16, k + 1e-16, dim=-1), dim=1)
return w
def _interpolate(self, w_prev, wc, g):
return g * wc + (1 - g) * w_prev
def _shift(self, wg, s):
result = torch.zeros(wg.size())
for b in range(self.batch_size):
result[b] = _convolve(wg[b], s[b])
return result
def _sharpen(self, ŵ, γ):
w = ŵ ** γ
w = torch.div(w, torch.sum(w, dim=1).view(-1, 1) + 1e-16)
return w
| {
"repo_name": "loudinthecloud/pytorch-ntm",
"path": "ntm/memory.py",
"copies": "1",
"size": "3159",
"license": "bsd-3-clause",
"hash": 8435238546008898000,
"line_mean": 30.7676767677,
"line_max": 93,
"alpha_frac": 0.5720190779,
"autogenerated": false,
"ratio": 3.2931937172774868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43652127951774866,
"avg_score": null,
"num_lines": null
} |
#annual summaries
import pandas as pd
import numpy as np
import matplotlib.pyplot as pylab
import os
import hashlib
import re
from datetime import datetime as dt
from StringIO import StringIO
import subprocess
def build_wallet(dataframe):
'''
creates/updates a wallet column in the dataframe based upon the txt files in ./wallet
'''
wallets = [a for a in os.listdir('./wallets') if a[-3:] == 'csv']
bools = {}
for file in wallets:
key = file[:-4]
reg = open('./wallets/'+file, 'r').read().strip().replace('\n','|')
bools[key] = dataframe.desc.str.contains(reg)
dataframe['wallet'] = 'unallocated'
for key in bools.keys():
dataframe['wallet'][bools[key]] = key
return dataframe
#define some paths
cwd = os.getcwd()
print cwd
#scan import folder for csvs
filelist = [cwd+'/csv/'+a for a in os.listdir(cwd+'/csv/') if a.lower()[-4:] == '.csv']
#import current database
data = []
#loop over all csv files found
for index, file in enumerate(filelist):
#strip account name from filename
account = re.findall(r'\w+-', file)[-1][:-1]
print account
#check for a viable filename
if account not in ['veridian', 'mastercard', 'visa', 'savings']:
print 'unable to parse filename - skipping'
continue
#read in csvs
holder = pd.read_csv(file, names=['date', 'amount', 'desc'], delimiter=',', usecols=[0,1,2])
holder['account'] = account
data.append(holder)
data = pd.concat(data) #build master frame
data.date = pd.to_datetime(data.date, dayfirst=True) #reformat date column
data = data.set_index('date', drop=False)
data = build_wallet(data) #add wallet definitions
#build calander
checkpoints = pd.date_range('2012-07-01', periods=18, freq='MS')
ncheckpoints = len(checkpoints) - 1
#view of incomes
income = data[(data.amount > 0) & (data.wallet != 'transfers')]
#view of costs
costs = data[(data.amount < 0) & (data.wallet != 'transfers')]
costs['amount'] *= -1
sorted = data.sort(columns=['amount'], ascending=False)
print sorted[sorted['wallet'] == 'unallocated'].desc
#summary of all costs since start, on a per monthly bases
##########################################
cost_summary = []
income_summary = []
columns = []
for key, grp in costs.groupby(['wallet']):
columns.append(key)
cost_summary.append(grp.resample('M', how=sum))
summary = pd.concat(cost_summary, axis=1, keys=columns)
income_summary = income.resample("M", how='sum')
#stacked barchart
fig = pylab.figure(figsize=(8.27,11.69))
ax1 = fig.add_subplot(111)
pylab.plot(np.arange(len(income_summary.amount.values))+0.5, income_summary.amount.values, label='total income')
summary.plot(ax=ax1, x = summary.index.month, kind='bar', stacked=True)
handles, labels = pylab.gca().get_legend_handles_labels()
pylab.legend(handles[::-1], labels[::-1], loc=9, labelspacing=0.2, prop={'size':10})
pylab.title("Summary of all expenses.")
pylab.xlabel('Month (2012-2013)')
pylab.ylabel("Amount ($)")
pylab.tight_layout()
pylab.savefig('all_expenses_summary.pdf')
############################################
#total income vs cost since start, on a per month bases
############################################
fig = pylab.figure(figsize=(8.27,11.69))
axes = [fig.add_subplot(311),fig.add_subplot(312),fig.add_subplot(313)]
#~ fig, axes = pylab.subplots(nrows=3, ncols=1)
data.resample('M', how=sum).plot(ax=axes[0], kind='bar')
axes[0].axes.get_xaxis().set_visible(False)
axes[0].set_title('Total Monthly Income')
axes[0].set_ylim(-6000,6000)
df = pd.concat([income.resample('M', how=sum), costs.resample('M', how=sum)], axis=1)
df.columns = ['income', 'costs']
df.plot(ax=axes[1], kind='bar', color=['k', 'r'])
axes[1].axes.get_xaxis().set_visible(False)
axes[1].set_title('Monthly Income Vs Cost')
axes[1].set_ylim(0,12000)
data.resample('M', how=sum).cumsum().plot(ax=axes[2])
axes[2].set_title('Cumulative Monthly Income')
axes[2].set_ylim(-10000,10000)
fig.tight_layout()
pylab.savefig('all_profit_loss_summary.pdf')
##########################################
#side by side bar chart
fig = pylab.figure(figsize=(8.27,11.69))
ax1 = fig.add_subplot(111)
summary.plot(ax=ax1, x = summary.index.month, kind='bar', legend=False)
handles, labels = pylab.gca().get_legend_handles_labels()
#~ pylab.legend(handles[::-1], labels[::-1], loc=9, labelspacing=0.2, prop={'size':10})
for index, rects in enumerate(ax1.containers):
for rect in rects:
height = rect.get_height()
if not np.isnan(height):
txt = labels[index].split(',')[0][1:]
ax1.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%s'%(txt),
ha='center', va='bottom', rotation='vertical', size=8)
pylab.title("Summary of all expenses.")
pylab.xlabel('Month (2012-2013)')
pylab.ylabel("Amount ($)")
pylab.tight_layout()
pylab.show()
#annual wallet pie chart
plot_amounts = []
plot_labels = []
for name, group in costs.groupby('wallet'):
plot_labels.append(name)
plot_amounts.append(group.amount.sum())
plot_amounts = np.array(plot_amounts)
plot_labels =np.array(plot_labels)
plot_labels = plot_labels[plot_amounts > 1000]
plot_amounts = plot_amounts[plot_amounts > 1000]
pylab.figure()
pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
pylab.title("Breakdown of all major costs since 1st july 2012")
pylab.savefig('total_cost_pie.pdf')
plot_amounts = []
plot_labels = []
for name, group in income.groupby('wallet'):
plot_labels.append(name)
plot_amounts.append(group.amount.sum())
plot_amounts = np.array(plot_amounts)
plot_labels =np.array(plot_labels)
plot_labels = plot_labels[plot_amounts > 1000]
plot_amounts = plot_amounts[plot_amounts > 1000]
pylab.figure()
pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
pylab.title("Breakdown of all major income sources since 1st july 2012")
pylab.savefig('total_income_pie.pdf')
#individual wallet summaries
#~ for name, group in costs.groupby('wallet'):
#~ plot_amounts = []
#~ plot_labels = []
#~ for item, batch in group.groupby('desc'):
#~ plot_amounts.append(batch.amount.sum())
#~ plot_labels.append(item)
#~ ind = np.argsort(plot_amounts)
#~ plot_amounts = np.take(np.array(plot_amounts), ind)[::-1][:10]
#~ plot_labels = np.take(np.array(plot_labels), ind)[::-1][:10]
#~ pylab.figure()
#~ pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
#~ pylab.title(name)
monthly_costs = costs.groupby([ lambda x: x.year, lambda x: x.month])
october2013 = monthly_costs.get_group((2013, 9))
# monthly wallet pie chart
plot_amounts = []
plot_labels = []
for name, group in october2013.groupby('wallet'):
plot_labels.append(name)
plot_amounts.append(group.amount.sum())
plot_amounts = np.array(plot_amounts)
plot_labels =np.array(plot_labels)
pylab.figure()
pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
pylab.title("Breakdown of all major costs for October 2013")
pylab.savefig('oct2013_cost_pie.pdf')
#~ plot_amounts = []
#~ plot_labels = []
#~ for name, group in income.groupby('wallet'):
#~ plot_labels.append(name)
#~ plot_amounts.append(group.amount.sum())
#~ plot_amounts = np.array(plot_amounts)
#~ plot_labels =np.array(plot_labels)
#~ plot_labels = plot_labels[plot_amounts > 1000]
#~ plot_amounts = plot_amounts[plot_amounts > 1000]
#~ pylab.figure()
#~ pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
#~ pylab.title("Breakdown of all major income sources for October 2013")
#~ pylab.savefig('oct2013_income_pie.pdf')
#individual wallet summaries
#~ total_costs = october2013.sum()['amount']
#~ for name, group in october2013.groupby('wallet'):
for name, group in costs.groupby('wallet'):
plot_amounts = []
plot_labels = []
for item, batch in group.groupby('desc'):
plot_amounts.append(batch.amount.sum())
plot_labels.append(item)
ind = np.argsort(plot_amounts)
plot_amounts = np.take(np.array(plot_amounts), ind)[::-1]
plot_labels = np.take(np.array(plot_labels), ind)[::-1]
pylab.figure()
pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
pylab.title(name)
#~ fig = pylab.figure(figsize=(8.27,11.69))
#~ ax1 = fig.add_subplot(111)
#~ summary.plot(ax=ax1, x = summary.index.month, kind='bar', legend=False)
pylab.show()
#~ fig, axes = pylab.subplots(nrows=2, ncols=1)
#~ group.resample('M', how=sum).plot(kind='bar',ax=axes[0])
#~ axes[0].set_title(name)
#~ group.resample('M', how=sum).fillna(0).cumsum().plot(ax=axes[1])
#~ monthly_summary = {}
#~ profit_loss = []
#~ duration = []
#~ for i in range(ncheckpoints): #build monthly summaries and wallets
#~ start = checkpoints[i]
#~ end = checkpoints[i+1]
#~ duration.append((end - start).days)
#~ print start.month
#~ print duration
#~ monthly_data = data[(data.date > start) & (data.date < end)]
#~ print ''
#========================
# monthly reviews
#========================
# for each month calculate total income and expenditures
# then split the monthy dataset into wallets
# for each month plot
# total income vs total expenditure (bar graph)
# breakdown of total income vs total expenditure (pie charts)
#~ monthly_slice = data[(data.date > start) & (data.date < end)]
#~ groups = monthly_slice.groupby('wallet')
#~ for key in groups.groups.keys():
#~ print key
#~ monthly_summary[start] =
#~ pylab.show()
#~ monthly_costs = monthly_slice[monthly_slice.amount < 0]
#~ monthly_incomes = monthly_slice[monthly_slice.amount > 0]
#~ profit_loss.append(monthly_costs.amount.sum() + monthly_incomes.amount.sum())
#~ pylab.bar(checkpoints[:-1], profit_loss, width=duration, label='profit/loss')
#~ pylab.plot(checkpoints[:-1]+1, np.cumsum(profit_loss), color='k', lw=2, label='cumulative')
#~ pylab.grid()
#~ pylab.xlabel('month')
#~ pylab.ylabel('profit/loss ($)')
#~ pylab.title('Monthy Profit/Loss and Running Total')
#~ pylab.legend(loc='best')
#~ pylab.show()
#~ dataset[start] = {'costs': build_wallet(monthly_costs), 'incomes': build_wallet(monthly_incomes)}
#~ tmp = dataset.keys()[1]
#~ print dataset[tmp]['costs']['unallocated']
#~ start_date = "24-9-2013"
#~ recent = data[pd.to_datetime(data.date, dayfirst=True) > pd.to_datetime(start_date, dayfirst=True)]
#~ duration = int((pd.to_datetime(data.date, dayfirst=True).max() - pd.to_datetime(start_date, dayfirst=True)).days)
#~ print ''
#~ document = """\\documentclass{{report}}
#~ \\usepackage[pdftex]{{graphicx}}
#~ \\usepackage[margin=1in]{{geometry}}
#~ \\usepackage{{booktabs}}
#~ \\usepackage{{soul}}
#~ \\title{{ {title} }}
#~ \\author{{ {author} }}
#~ \\begin{{document}}
#~ \\maketitle
#~ \\tableofcontents
#~ \\newpage
#~ \\section{{Summary}}
#~ This document summarises expenditure since last pay.
#~ \\begin{{table}}[h]
#~ \\centering
#~ \\begin{{tabular}}{{cc}}
#~ start & {start} \\\\
#~ finish & {finish} \\\\
#~ & \\\\
#~ spent total & {s_total} \\\\
#~ spent / day & \hl{{ {s_day} }}\\\\
#~ & \\\\
#~ income total & {i_total} \\\\
#~ income / day & \\hl{{ {i_day} }} \\\\
#~ \\end{{tabular}}
#~ \\end{{table}}
#~ \\newpage
#~ """
#~ info = {
#~ 'title': 'Household Expenditure Review',
#~ 'author': 'S. Fletcher',
#~ 'start': start_date,
#~ 'finish': "14-10-2013",
#~ 's_total': '\$%.2f' %-recent.amount.sum(),
#~ 's_day': '\$%.2f' %(-recent.amount.sum()/duration),
#~ 'i_total' : '\$4067.53',
#~ 'i_day': '\$%.2f' %(4067.53/31),
#~ }
#~ document = document.format(**info)
#~ keylist = wallet.keys()
#~ keylist.remove('transfers')
#~ keylist.remove('exclude')
#~ plot_labels = []
#~ plot_amounts = []
#~ total_amounts = []
#~ for key in keylist:
#~ document += '\\section{%s}\n' %key
#~ document += recent[wallet[key]].to_latex().replace('#','')
#~ document += '\\newline\n'
#~ document += 'Total: \$%.2f \\newline\n' %(recent[wallet[key]].amount.sum())
#~ document += 'Per day: \$%.2f\n' %(recent[wallet[key]].amount.sum()/31.)
#~ document += '\\newpage\n\n'
#~ plot_labels.append(key)
#~ plot_amounts.append(recent[wallet[key]].amount.sum())
#~ total_amounts.append(data[wallet[key]].amount.sum())
#~ plot_amounts = np.array(plot_amounts)/-4067
#~ total_amounts = np.array(total_amounts)/np.sum(total_amounts)
#~ pylab.pie(plot_amounts, labels=plot_labels, autopct='%1.1f%%', shadow=True, startangle=90, colors=('b', 'g', 'r', 'c', 'm', 'y'),)
#~ pylab.savefig('pie.pdf')
#~ pylab.figure()
#~ recent = recent[~wallet['exclude'] & ~wallet['transfers']]
#~ recent.date =pd.to_datetime(recent['date'], dayfirst=True)
#~ recent = recent.sort('date')
#~ recent['cumsum'] = -recent.amount.cumsum()
#~ recent.plot(x='date', y='cumsum', kind='bar', label='actual')
#~ pylab.gcf().autofmt_xdate()
#~ ax = pylab.gca()
#~ ax.set_xticklabels([], minor=False)
#~ ax.plot([0,92], [131, 31*131], lw=2, color='k', label='required')
#~ pylab.legend(loc='best')
#~ pylab.title('Running sum of expenditure over time')
#~ pylab.xlabel('date')
#~ pylab.ylabel('total spent')
#~ pylab.ylim(0, 4000)
#~ pylab.xlim(0, 90)
#~ pylab.savefig('bar.pdf')
#~ document += '''
#~ \\begin{figure}[p]
#~ \\centering
#~ \\includegraphics[width=0.8\\textwidth]{pie.pdf}
#~ \\caption{Pie chart of expenditures since last pay}
#~ \\label{fig:pie}
#~ \\end{figure}
#~ \\begin{figure}[p]
#~ \\centering
#~ \\includegraphics[width=0.8\\textwidth]{bar.pdf}
#~ \\caption{Rate of Expenditure}
#~ \\label{fig:rate}
#~ \\end{figure}
#~ \\end{document}
#~ '''
#~ open('budget.tex', 'w').write(document)
#~ subprocess.call('pdflatex budget.tex', shell=True)
#~
#~
#~ pylab.show()
#~ print pd.to_datetime(['10/10/2013'], dayfirst=True)
#~ data[account] =
#~ if data[account]: print True
#append to appropriate tables
#~ ???
#~ tmp = data.get(account, [])
#~ tmp.append(store)
#~ data[account] = pd.concat(tmp)
#~
#update unique key
#~ keylist = data.date.astype(np.int64).astype(np.str) + \
#~ data.amount.astype(np.str) + \
#~ data.desc.astype(np.str) + \
#~ data.total.astype(np.str)
#~ data['id'] = keylist.map(lambda x: hashlib.md5(x).hexdigest())
#~ idlist = list(data.id.values)
#~ print len(idlist)
#~ print len(list(set(idlist)))
#~ store = pd.HDFStore('budget.h5')
#~ store.append('accounts', data, data_columns=['date', 'id', 'amount', 'desc'])
#~ print store['/accounts/veridian'].head()
#~ print store
#append to appropriate tables if unique | {
"repo_name": "stuliveshere/Python-Wallets",
"path": "wallet/build_report.py",
"copies": "1",
"size": "15113",
"license": "mit",
"hash": -5811005723400855000,
"line_mean": 27.871541502,
"line_max": 134,
"alpha_frac": 0.6250909813,
"autogenerated": false,
"ratio": 2.8275023386342375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3952593319934237,
"avg_score": null,
"num_lines": null
} |
""" An oauthlib.oauth2 request valditator for Vumi Go.
"""
from oauthlib.oauth2 import RequestValidator, WebApplicationServer
class StaticAuthValidator(RequestValidator):
""" An oauthlib.oauth2 request validator.
A validator built on top of a dictionary of access tokens.
:param dict auth_store:
The authentication store, a dictionary mapping access
tokens to credentials.
Example auth_store::
{
"ac3sst0k3n": {
"owner_id": "0wn3r1d",
"client_id": "cl13nt51d",
"scopes": ["scope1", "scope2"],
},
}
"""
def __init__(self, auth_store=None):
if auth_store is None:
auth_store = {}
self.auth_store = auth_store
def validate_bearer_token(self, token, scopes, request):
request.token = token
if token is None:
return False
creds = self.auth_store.get(token)
if creds is None:
return False
request.owner_id = creds["owner_id"]
request.client_id = creds["client_id"]
request.scopes = creds["scopes"]
return True
def save_bearer_token(self, token, request, *args, **kw):
# Save the bearer token in the auth_store
raise NotImplementedError()
def validate_client_id(self, client_id, request, *args, **kwargs):
# Simple validity check, does client exist? Not banned?
raise NotImplementedError()
def validate_redirect_uri(self, client_id, redirect_uri, request,
*args, **kwargs):
# Is the client allowed to use the supplied redirect_uri? i.e. has
# the client previously registered this EXACT redirect uri.
raise NotImplementedError()
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# The redirect used if none has been supplied.
# Prefer your clients to pre register a redirect uri rather than
# supplying one on each authorization request.
raise NotImplementedError()
def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
# Is the client allowed to access the requested scopes?
raise NotImplementedError()
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
raise NotImplementedError()
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
# Clients should only be allowed to use one type of response type, the
# one associated with their one allowed grant type.
# In this case it must be "code".
raise NotImplementedError()
def static_web_authenticator(auth_store):
""" Return a Vumi Go static web authenticator.
"""
validator = StaticAuthValidator(auth_store)
authenticator = WebApplicationServer(validator)
return authenticator
| {
"repo_name": "praekelt/go-auth",
"path": "go_auth/validator.py",
"copies": "1",
"size": "3068",
"license": "bsd-3-clause",
"hash": 8799157789749055000,
"line_mean": 34.2643678161,
"line_max": 79,
"alpha_frac": 0.6261408083,
"autogenerated": false,
"ratio": 4.382857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5508997951157143,
"avg_score": null,
"num_lines": null
} |
#An object can be a class, id, element, etc, or the contents such as background-color, width, etc.
import Variable
import Error
# processObject parses .syn file contents into a list. The parameter string is one of the lines from the .syn file
global prevString
global prevName
global elements
global element
prevString = ""
currentString = ""
elements = []
prevName = []
element = -1
# processObject parses .syn file contents into a list. The parameter string is one of the lines from the .syn file
def init(): #This is caled to reinitalise the variables before each process.
global prevString
global prevName
global elements
global element
prevString = ""
currentString = ""
elements = []
prevName = []
element = -1
#Process the given line of the file.
def process(string):
global prevString
global prevName
global elements
global element
currentIndentation = indentation(string)
prevIndentation = indentation(prevString)
if (currentIndentation > prevIndentation): # Previous line must be a parent.
if (element == None): #Ignore the first element, as it is invalid.
element = -1
else:
prevName.append(prevString) # Register the previous string as a new parent element.
if (len(elements) > 0):
elements[element]["children"] = listRemove(1,elements[element]["children"]) # Delete this parent from the children of the previous parent.
elements.append({"name": " ".join(prevName), "children":[]}) # Put the prev string (the parent) in the list of elements.
element = lastIndex(elements)
elif (currentIndentation < prevIndentation): # This means we are not a child of the previous parent(s), and are going down a single or multiple parent.
# Remove the previous parent(s), and go down a certain amount of elements
element -= prevIndentation-currentIndentation
for i in 0,prevIndentation-currentIndentation: # Delete however many elements we went back (counted via indentations)
if (len(prevName) != 0):
prevName.pop()
if (len(elements) > 0): # With the way this parser works, we have to add every line to the children of the previous element.
elements[element]["children"].append(string)
prevString = string # At the end of each cycle, this sets the previous string
# Exports the elements to a CSS file
def export(cssPath):
global elements
cssFile = open(cssPath,"w")
elements = compressList(elements)
for element in elements: # Go through each element, which is the parents.
cssFile.write(element["name"] + "{") # Write the parent, followed by a { bracket
for child in element["children"]: # Go through each child in the elements children.
var, tok = Variable.processVariable(child)
tok = list(tok)
child = (var + ":" + tok[0])
if tok[1] != None: # A lot of variables don't have an extension, so lets see if it does.
child += tok[1]
cssFile.write(child + ";") # Write the children (variables)
cssFile.write("}") # Close the parent.
cssFile.close()
#------ Tidbits ------#z
def combineList(sourceList):
for sourceIndex, sourceElement in enumerate(sourceList, start=0): # Start looping through the first list
for copyIndex, copyElement in enumerate(sourceList, start=0): # Star looping through the same list, this allows us to compare each element in the list to each other.
if sourceElement["name"] == copyElement["name"]: # If either of the two have the same name, that means we should delete the copy, and take the children from the copy into the original.
sourceList[sourceIndex]["children"] += sourceList[copyIndex]["children"]
sourceList.pop(copyIndex)
return sourceList
def compressList(sourceList):
for parentIndex, parent in enumerate(sourceList):
sourceList[parentIndex]["name"] = strip(parent["name"])
for childIndex, child in enumerate(parent["children"]):
sourceList[parentIndex]["children"][childIndex] = strip(child)
return sourceList
def lastIndex(list): # Returns the last index of a given list.
return len(list)-1
def indentation(string): # Returns the number of intentions in a string
return len(string) - len(string.lstrip())
def strip(string): # Strips indents and linebreaks from a string.
return string.lstrip().strip("\n").strip(" ").strip("\t")
def listRemove(amount,list): # Removes (n) amount of items from a list, starting from the end.
return list[:-amount]
| {
"repo_name": "Wezro/Syntax",
"path": "src/Objects.py",
"copies": "1",
"size": "4676",
"license": "mit",
"hash": -6505067442664966000,
"line_mean": 37.6446280992,
"line_max": 196,
"alpha_frac": 0.6755774166,
"autogenerated": false,
"ratio": 4.2202166064981945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5395794023098195,
"avg_score": null,
"num_lines": null
} |
"""An object-local variable management scheme."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import json
import weakref
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops as io_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_decorator
# Key where the object graph proto is saved in a TensorBundle
OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# A key indicating a variable's value in an object's checkpointed Tensors
# (Checkpointable._gather_saveables_for_checkpoint). If this is the only key and
# the object has no dependencies, then its value may be restored on object
# creation (avoiding double assignment when executing eagerly).
VARIABLE_VALUE_KEY = "VARIABLE_VALUE"
OBJECT_CONFIG_JSON_KEY = "OBJECT_CONFIG_JSON"
CheckpointableReference = collections.namedtuple(
"CheckpointableReference",
[
# The local name for this dependency.
"name",
# The Checkpointable object being referenced.
"ref"
])
class CheckpointInitialValue(ops.Tensor):
"""Tensor wrapper for managing update UIDs in `Variables`.
When supplied as an initial value, objects of this type let a `Variable`
(`Variable`, `ResourceVariable`, etc.) know the UID of the restore the initial
value came from. This allows deferred restorations to be sequenced in the
order the user specified them, and lets us fall back on assignment if an
initial value is not set (e.g. due to a custom getter interfering).
See comments in _add_variable_with_custom_getter for more information about
how `CheckpointInitialValue` is used.
"""
def __init__(self, checkpoint_position, shape=None):
self.wrapped_value = checkpoint_position.value_tensors()[
VARIABLE_VALUE_KEY]
if shape:
# We need to set the static shape information on the initializer if
# possible so we don't get a variable with an unknown shape.
self.wrapped_value.set_shape(shape)
self._checkpoint_position = checkpoint_position
def __getattr__(self, attr):
try:
return getattr(self.wrapped_value, attr)
except AttributeError:
return self.__getattribute__(attr)
@property
def checkpoint_position(self):
return self._checkpoint_position
class NoRestoreSaveable(saveable_object.SaveableObject):
"""Embeds a tensor in a checkpoint with no restore ops."""
def __init__(self, tensor, name, dtype=None):
spec = saveable_object.SaveSpec(tensor, "", name, dtype=dtype)
super(NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
@six.add_metaclass(abc.ABCMeta)
class PythonStateSaveable(saveable_object.SaveableObject):
"""An interface for saving/restoring volatile Python state."""
@abc.abstractmethod
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed.
Returns:
A dictionary mapping `Tensor`s to current Python state.
"""
pass
@abc.abstractmethod
def freeze(self):
"""Create a new `SaveableObject` which freezes current state as a constant.
Used when executing eagerly to embed the current state as a constant, or
when creating a static tf.train.Saver with the frozen current Python state.
Returns:
A `SaveableObject` which is not a `PythonStateSaveable` instance (i.e. has
no Python state associated with it).
"""
pass
class PythonStringStateSaveable(PythonStateSaveable):
"""Saves Python state in a checkpoint."""
def __init__(self, name, state_callback, restore_callback=None):
"""Configure saving.
Args:
name: The checkpoint key to write to.
state_callback: A function taking no arguments which returns a
string. This function is run every time a checkpoint is written.
restore_callback: A function taking a Python string, used to restore
state. Optional; defaults to doing nothing, in which case it is ignored
by status assertions such as assert_consumed().
"""
self._has_trivial_state_callback = (restore_callback is None)
def _state_callback_wrapper():
with ops.init_scope():
return state_callback()
self._state_callback = _state_callback_wrapper
self._restore_callback = restore_callback
with ops.device("/cpu:0"):
self._save_string = constant_op.constant("", dtype=dtypes.string)
spec = saveable_object.SaveSpec(
self._save_string, "", name, dtype=dtypes.string)
super(PythonStringStateSaveable, self).__init__(
self._save_string, [spec], name)
@property
def optional_restore(self):
"""For values with no restore, relaxes assert_consumed()."""
return self._has_trivial_state_callback
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed."""
return {self._save_string: self._state_callback()}
def freeze(self):
"""Create a frozen `SaveableObject` which saves the current state."""
def _constant_state():
return constant_op.constant(self._state_callback(), dtype=dtypes.string)
return NoRestoreSaveable(
tensor=_constant_state,
dtype=dtypes.string,
name=self.name)
def python_restore(self, restored_strings):
"""Called to restore Python state."""
if self._restore_callback:
restored, = restored_strings
self._restore_callback(restored)
def restore(self, restored_tensors, restored_shapes):
"""Called to restore TensorFlow state (nothing to do)."""
return control_flow_ops.no_op()
class _CheckpointPosition(object):
"""Indicates a position within a `_Checkpoint`."""
def __init__(self, checkpoint, proto_id):
"""Specify an object within a checkpoint.
Args:
checkpoint: A _Checkpoint object.
proto_id: The index of this object in CheckpointableObjectGraph.nodes.
"""
self._checkpoint = checkpoint
self._proto_id = proto_id
def restore(self, checkpointable):
"""Restore this value into `checkpointable`."""
with ops.init_scope():
if self.bind_object(checkpointable):
# This object's correspondence with a checkpointed object is new, so
# process deferred restorations for it and its dependencies.
restore_ops = checkpointable._restore_from_checkpoint_position(self) # pylint: disable=protected-access
if restore_ops:
self._checkpoint.new_restore_ops(restore_ops)
def bind_object(self, checkpointable):
"""Set a checkpoint<->object correspondence and process slot variables.
Args:
checkpointable: The object to record a correspondence for.
Returns:
True if this is a new assignment, False if this object has already been
mapped to a checkpointed `Object` proto.
Raises:
AssertionError: If another object is already bound to the `Object` proto.
"""
checkpoint = self.checkpoint
checkpoint.all_python_objects.add(checkpointable)
current_assignment = checkpoint.object_by_proto_id.get(self._proto_id, None)
if current_assignment is None:
checkpoint.object_by_proto_id[self._proto_id] = checkpointable
for deferred_slot_restoration in (
checkpoint.deferred_slot_restorations.pop(self._proto_id, ())):
checkpointable._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=_CheckpointPosition(
checkpoint=checkpoint,
proto_id=deferred_slot_restoration.slot_variable_id),
variable=deferred_slot_restoration.original_variable,
slot_name=deferred_slot_restoration.slot_name)
for slot_restoration in checkpoint.slot_restorations.pop(
self._proto_id, ()):
optimizer_object = checkpoint.object_by_proto_id.get(
slot_restoration.optimizer_id, None)
if optimizer_object is None:
# The optimizer has not yet been created or tracked. Record in the
# checkpoint that the slot variables need to be restored when it is.
checkpoint.deferred_slot_restorations.setdefault(
slot_restoration.optimizer_id, []).append(
_DeferredSlotVariableRestoration(
original_variable=checkpointable,
slot_variable_id=slot_restoration.slot_variable_id,
slot_name=slot_restoration.slot_name))
else:
optimizer_object._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=_CheckpointPosition(
checkpoint=checkpoint,
proto_id=slot_restoration.slot_variable_id),
variable=checkpointable,
slot_name=slot_restoration.slot_name)
return True # New assignment
else:
# The object was already mapped for this checkpoint load, which means
# we don't need to do anything besides check that the mapping is
# consistent (if the dependency DAG is not a tree then there are
# multiple paths to the same object).
if current_assignment is not checkpointable:
logging.warning(
("Inconsistent references when loading the checkpoint into this "
"object graph. Either the Checkpointable object references in the "
"Python program have changed in an incompatible way, or the "
"checkpoint was generated in an incompatible program.\n\nTwo "
"checkpoint references resolved to different objects (%s and %s).")
% (current_assignment, checkpointable))
return False # Not a new assignment
def is_simple_variable(self):
"""Determine whether this value is restorable with a Tensor initializer."""
attributes = self.object_proto.attributes
return (len(attributes) == 1
and attributes[0].name == VARIABLE_VALUE_KEY
and not self.object_proto.children)
def value_tensors(self):
"""Create value `Tensor`s for this object's attributes.
Does not require that the Python object has been created. Used for
restore-on-create when executing eagerly.
Returns:
A dictionary mapping from object attribute names to `Tensor`s.
"""
value_tensors = {}
for serialized_tensor in self.object_proto.attributes:
checkpoint_key = serialized_tensor.checkpoint_key
dtype = self._checkpoint.dtype_map[checkpoint_key]
base_type = dtype.base_dtype
with ops.init_scope():
with ops.device("/cpu:0"):
# Run the restore itself on the CPU.
value, = io_ops.restore_v2(
prefix=self._checkpoint.save_path_tensor,
tensor_names=[checkpoint_key],
shape_and_slices=[""],
dtypes=[base_type],
name="%s_checkpoint_read" % (serialized_tensor.name,))
# Copy the value to the current device if necessary.
value_tensors[serialized_tensor.name] = array_ops.identity(value)
return value_tensors
def _gather_ops_or_named_saveables(self):
"""Looks up or creates SaveableObjects which don't have cached ops."""
saveables = self.checkpointable._gather_saveables_for_checkpoint() # pylint: disable=protected-access
# Name saveables based on the name this object had when it was checkpointed.
named_saveables = {}
python_saveables = []
existing_restore_ops = []
for serialized_tensor in self.object_proto.attributes:
if context.executing_eagerly():
existing_op = None
else:
existing_op = self._checkpoint.restore_ops_by_name.get(
serialized_tensor.checkpoint_key, None)
if existing_op is not None:
existing_restore_ops.append(existing_op)
continue
# Only if we don't have cached ops for this SaveableObject, we'll see if
# the SaveableObject itself has been cached. If not, we'll make it, and
# either way we'll extract new ops from it (or if it has Python state to
# restore, we'll run that).
if self._checkpoint.saveable_object_cache is None:
# No SaveableObject caching when executing eagerly.
saveable = None
else:
# If we've already created and cached a SaveableObject for this
# attribute, we can re-use it to avoid re-creating some ops when graph
# building.
saveable_list = self._checkpoint.saveable_object_cache.get(
self.checkpointable, {}).get(serialized_tensor.name, (None,))
if len(saveable_list) == 1:
# Almost every attribute will have exactly one SaveableObject.
saveable, = saveable_list
else:
# Don't use cached SaveableObjects for partitioned variables, which is
# the only case where we'd have a list of SaveableObjects. Op caching
# will catch them.
saveable = None
if saveable is not None:
# The name of this attribute has changed, so we need to re-generate
# the SaveableObject.
if serialized_tensor.checkpoint_key not in saveable.name:
saveable = None
del self._checkpoint.saveable_object_cache[self.checkpointable]
break
if saveable is None:
# If there was no cached SaveableObject, we should check if the Python
# object has the attribute.
saveable_factory = saveables.get(serialized_tensor.name, None)
if saveable_factory is None:
# Purposefully does not throw an exception if attributes have been
# added or deleted. Stores unused attributes so an exception can be
# raised if the user decides to check that everything in the
# checkpoint was loaded.
if not serialized_tensor.optional_restore:
self._checkpoint.unused_attributes.setdefault(
self.checkpointable, []).append(serialized_tensor.name)
continue
if callable(saveable_factory):
saveable = saveable_factory(name=serialized_tensor.checkpoint_key)
else:
saveable = saveable_factory
if self._checkpoint.saveable_object_cache is not None:
self._checkpoint.saveable_object_cache.setdefault(
self.checkpointable, {})[serialized_tensor.name] = [saveable]
if isinstance(saveable, PythonStateSaveable):
python_saveables.append(saveable)
else:
named_saveables[serialized_tensor.checkpoint_key] = saveable
return existing_restore_ops, named_saveables, python_saveables
def restore_ops(self):
"""Create or fetch restore ops for this object's attributes.
Requires that the `Checkpointable` Python object has been bound to an object
ID in the checkpoint.
Returns:
A list of operations when graph building, or an empty list when executing
eagerly.
"""
(restore_ops,
tensor_saveables,
python_saveables) = self._gather_ops_or_named_saveables()
restore_ops.extend(self._checkpoint.restore_saveables(
tensor_saveables, python_saveables))
return restore_ops
@property
def checkpoint(self):
return self._checkpoint
@property
def checkpointable(self):
return self._checkpoint.object_by_proto_id[self._proto_id]
@property
def object_proto(self):
return self._checkpoint.object_graph_proto.nodes[self._proto_id]
@property
def restore_uid(self):
return self._checkpoint.restore_uid
def __repr__(self):
return repr(self.object_proto)
_DeferredSlotVariableRestoration = collections.namedtuple(
"_DeferredSlotVariableRestoration",
[
"original_variable",
"slot_variable_id",
"slot_name",
]
)
_SlotVariableRestoration = collections.namedtuple(
"_SlotVariableRestoration",
[
# The checkpoint proto id of the optimizer object.
"optimizer_id",
# The checkpoint proto id of the slot variable.
"slot_variable_id",
"slot_name",
])
def no_automatic_dependency_tracking(method):
"""Disables automatic dependency tracking on attribute assignment.
Use to decorate any method of a Checkpointable object. Attribute assignment in
that method will not add dependencies (also respected in Model). Harmless if
used in a class which does not do automatic dependency tracking (which means
it's safe to use in base classes which may have subclasses which also inherit
from Checkpointable).
Args:
method: The method to decorate.
Returns:
A decorated method which sets and un-sets automatic dependency tracking for
the object the method is called on (not thread safe).
"""
def _method_wrapper(self, *args, **kwargs):
previous_value = getattr(self, "_setattr_tracking", True)
self._setattr_tracking = False # pylint: disable=protected-access
try:
method(self, *args, **kwargs)
finally:
self._setattr_tracking = previous_value # pylint: disable=protected-access
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
class Checkpointable(object):
"""Base class for `Checkpointable` objects without automatic dependencies.
This class has no __setattr__ override for performance reasons. Dependencies
must be added explicitly. Unless attribute assignment is performance-critical,
use `AutoCheckpointable` instead. Use `Checkpointable` for `isinstance`
checks.
"""
# Checkpointable does not do automatic dependency tracking, but uses the
# no_automatic_dependency_tracking decorator so it can avoid adding
# dependencies if a subclass is Checkpointable / inherits from Model (both of
# which have __setattr__ overrides).
@no_automatic_dependency_tracking
def _maybe_initialize_checkpointable(self):
"""Initialize dependency management.
Not __init__, since most objects will forget to call it.
"""
if hasattr(self, "_unconditional_checkpoint_dependencies"):
# __init__ already called. This check means that we don't need
# Checkpointable.__init__() in the constructor of every TensorFlow object.
return
# A list of CheckpointableReference objects. Some classes implementing
# `Checkpointable`, notably `Optimizer`s, may override the
# _checkpoint_dependencies property with conditional dependencies
# (e.g. based on the current graph when saving).
self._unconditional_checkpoint_dependencies = []
# Maps names -> Checkpointable objects
self._unconditional_dependency_names = {}
# Restorations for other Checkpointable objects on which this object may
# eventually depend. Maps local name -> _CheckpointPosition list. Optimizers
# tack on conditional dependencies, and so need separate management of
# deferred dependencies too.
self._unconditional_deferred_dependencies = {}
# The UID of the highest assignment to this object. Used to ensure that the
# last requested assignment determines the final value of an object.
if hasattr(self, "_update_uid"):
raise AssertionError(
"Internal error: the object had an update UID set before its "
"initialization code was run.")
self._update_uid = -1
# When executing eagerly, holds a collection of _NameBasedRestoreCoordinator
# instances, which should be checked when creating variables or other
# saveables. These are passed on recursively to all dependencies, since
# unlike object-based checkpoint restores we don't know which subgraph is
# being restored in advance. This mechanism is only necessary for
# restore-on-create when executing eagerly, and so is unused when graph
# building.
self._name_based_restores = set()
def _no_dependency(self, value):
"""If automatic dependency tracking is enabled, ignores `value`."""
return value
def _name_based_attribute_restore(self, checkpoint):
"""Restore the object's attributes from a name-based checkpoint."""
self._name_based_restores.add(checkpoint)
if self._update_uid < checkpoint.restore_uid:
checkpoint.eager_restore(self)
self._update_uid = checkpoint.restore_uid
@property
def _checkpoint_dependencies(self):
"""All dependencies of this object.
May be overridden to include conditional dependencies.
Returns:
A list of `CheckpointableReference` objects indicating named
`Checkpointable` dependencies which should be saved along with this
object.
"""
return self._unconditional_checkpoint_dependencies
@property
def _deferred_dependencies(self):
"""A dictionary with deferred dependencies.
Stores restorations for other Checkpointable objects on which this object
may eventually depend. May be overridden by sub-classes (e.g. Optimizers use
conditional dependencies based the current graph, and so need separate
management of deferred dependencies too).
Returns:
A dictionary mapping from local name to a list of _CheckpointPosition
objects.
"""
return self._unconditional_deferred_dependencies
def _lookup_dependency(self, name):
"""Look up a dependency by name.
May be overridden to include conditional dependencies.
Args:
name: The local name of the dependency.
Returns:
A `Checkpointable` object, or `None` if no dependency by this name was
found.
"""
return self._unconditional_dependency_names.get(name, None)
def _add_variable_with_custom_getter(
self, name, shape=None, dtype=dtypes.float32,
initializer=None, getter=None, overwrite=False,
**kwargs_for_getter):
"""Restore-on-create for a variable be saved with this `Checkpointable`.
If the user has requested that this object or another `Checkpointable` which
depends on this object be restored from a checkpoint (deferred loading
before variable object creation), `initializer` may be ignored and the value
from the checkpoint used instead.
Args:
name: A name for the variable. Must be unique within this object.
shape: The shape of the variable.
dtype: The data type of the variable.
initializer: The initializer to use. Ignored if there is a deferred
restoration left over from a call to
`_restore_from_checkpoint_position`.
getter: The getter to wrap which actually fetches the variable.
overwrite: If True, disables unique name and type checks.
**kwargs_for_getter: Passed to the getter.
Returns:
The new variable object.
Raises:
ValueError: If the variable name is not unique.
"""
self._maybe_initialize_checkpointable()
with ops.init_scope():
if context.executing_eagerly():
# If this is a variable with a single Tensor stored in the checkpoint,
# we can set that value as an initializer rather than initializing and
# then assigning (when executing eagerly). This call returns None if
# there is nothing to restore.
checkpoint_initializer = self._preload_simple_restoration(
name=name, shape=shape)
else:
checkpoint_initializer = None
if (checkpoint_initializer is not None
and not (
isinstance(initializer, CheckpointInitialValue)
and (initializer.restore_uid
> checkpoint_initializer.restore_uid))):
# If multiple Checkpointable objects are "creating" the same variable
# via the magic of custom getters, the one with the highest restore UID
# (the one called last) has to make the final initializer. If another
# custom getter interrupts this process by overwriting the initializer,
# then we'll catch that when we call _track_checkpointable. So this is
# "best effort" to set the initializer with the highest restore UID.
initializer = checkpoint_initializer
shape = None
new_variable = getter(
name=name, shape=shape, dtype=dtype, initializer=initializer,
**kwargs_for_getter)
# If we set an initializer and the variable processed it, tracking will not
# assign again. It will add this variable to our dependencies, and if there
# is a non-trivial restoration queued, it will handle that. This also
# handles slot variables.
if not overwrite or isinstance(new_variable, Checkpointable):
return self._track_checkpointable(new_variable, name=name,
overwrite=overwrite)
else:
# TODO(allenl): Some variable types are not yet supported. Remove this
# fallback once all get_variable() return types are Checkpointable.
return new_variable
def _preload_simple_restoration(self, name, shape):
"""Return a dependency's value for restore-on-create.
Note the restoration is not deleted; if for some reason preload is called
and then not assigned to the variable (for example because a custom getter
overrides the initializer), the assignment will still happen once the
variable is tracked (determined based on checkpoint.restore_uid).
Args:
name: The object-local name of the dependency holding the variable's
value.
shape: The shape of the variable being loaded into.
Returns:
An callable for use as a variable's initializer/initial_value, or None if
one should not be set (either because there was no variable with this name
in the checkpoint or because it needs more complex deserialization). Any
non-trivial deserialization will happen when the variable object is
tracked.
"""
deferred_dependencies_list = self._deferred_dependencies.get(name, ())
if not deferred_dependencies_list:
# Nothing to do; we don't have a restore for this dependency queued up.
return
for checkpoint_position in deferred_dependencies_list:
if not checkpoint_position.is_simple_variable():
# If _any_ pending restoration is too complicated to fit in an
# initializer (because it has dependencies, or because there are
# multiple Tensors to restore), bail and let the general tracking code
# handle it.
return None
checkpoint_position = max(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid)
return CheckpointInitialValue(
checkpoint_position=checkpoint_position, shape=shape)
def _track_checkpointable(self, checkpointable, name, overwrite=False):
"""Declare a dependency on another `Checkpointable` object.
Indicates that checkpoints for this object should include variables from
`checkpointable`.
Variables in a checkpoint are mapped to `Checkpointable`s based on the names
provided when the checkpoint was written. To avoid breaking existing
checkpoints when modifying a class, neither variable names nor dependency
names (the names passed to `_track_checkpointable`) may change.
Args:
checkpointable: A `Checkpointable` which this object depends on.
name: A local name for `checkpointable`, used for loading checkpoints into
the correct objects.
overwrite: Boolean, whether silently replacing dependencies is OK. Used
for __setattr__, where throwing an error on attribute reassignment would
be inappropriate.
Returns:
`checkpointable`, for convenience when declaring a dependency and
assigning to a member variable in one statement.
Raises:
TypeError: If `checkpointable` does not inherit from `Checkpointable`.
ValueError: If another object is already tracked by this name.
"""
self._maybe_initialize_checkpointable()
if not isinstance(checkpointable, Checkpointable):
raise TypeError(
("Checkpointable._track_checkpointable() passed type %s, not a "
"Checkpointable.") % (type(checkpointable),))
new_reference = CheckpointableReference(name=name, ref=checkpointable)
current_object = self._lookup_dependency(name)
if (current_object is not None
and current_object is not checkpointable):
if not overwrite:
raise ValueError(
("Called Checkpointable._track_checkpointable() with name='%s', "
"but a Checkpointable with this name is already declared as a "
"dependency. Names must be unique (or overwrite=True).") % (name,))
# This is a weird thing to do, but we're not going to stop people from
# using __setattr__.
for index, (old_name, _) in enumerate(
self._unconditional_checkpoint_dependencies):
if name == old_name:
self._unconditional_checkpoint_dependencies[index] = new_reference
elif current_object is None:
self._unconditional_checkpoint_dependencies.append(new_reference)
self._handle_deferred_dependencies(
name=name, checkpointable=checkpointable)
self._unconditional_dependency_names[name] = checkpointable
return checkpointable
def _handle_deferred_dependencies(self, name, checkpointable):
"""Pop and load any deferred checkpoint restores into `checkpointable`.
This method does not add a new dependency on `checkpointable`, but it does
check if any outstanding/deferred dependencies have been queued waiting for
this dependency to be added (matched based on `name`). If so,
`checkpointable` and its dependencies are restored. The restorations are
considered fulfilled and so are deleted.
`_track_checkpointable` is more appropriate for adding a
normal/unconditional dependency, and includes handling for deferred
restorations. This method allows objects such as `Optimizer` to use the same
restoration logic while managing conditional dependencies themselves, by
overriding `_checkpoint_dependencies` and `_lookup_dependency` to change the
object's dependencies based on the context it is saved/restored in (a single
optimizer instance can have state associated with multiple graphs).
Args:
name: The name of the dependency within this object (`self`), used to
match `checkpointable` with values saved in a checkpoint.
checkpointable: The Checkpointable object to restore (inheriting from
`Checkpointable`).
"""
self._maybe_initialize_checkpointable()
checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(checkpointable)
# Pass on any name-based restores queued in this object.
for name_based_restore in sorted(
self._name_based_restores,
key=lambda checkpoint: checkpoint.restore_uid,
reverse=True):
checkpointable._name_based_attribute_restore(name_based_restore) # pylint: disable=protected-access
def _restore_from_checkpoint_position(self, checkpoint_position):
"""Restore this object and its dependencies (may be deferred)."""
# Attempt a breadth-first traversal, since presumably the user has more
# control over shorter paths. If we don't have all of the dependencies at
# this point, the end result is not breadth-first (since other deferred
# traversals will happen later).
visit_queue = collections.deque([checkpoint_position])
restore_ops = []
while visit_queue:
current_position = visit_queue.popleft()
restore_ops.extend(nest.flatten(
current_position.checkpointable # pylint: disable=protected-access
._single_restoration_from_checkpoint_position(
checkpoint_position=current_position,
visit_queue=visit_queue)))
return restore_ops
def _single_restoration_from_checkpoint_position(
self, checkpoint_position, visit_queue):
"""Restore this object, and either queue its dependencies or defer them."""
self._maybe_initialize_checkpointable()
checkpoint = checkpoint_position.checkpoint
# If the UID of this restore is lower than our current update UID, we don't
# need to actually restore the object. However, we should pass the
# restoration on to our dependencies.
if checkpoint.restore_uid > self._update_uid:
restore_ops = checkpoint_position.restore_ops()
self._update_uid = checkpoint.restore_uid
else:
restore_ops = ()
for child in checkpoint_position.object_proto.children:
child_position = _CheckpointPosition(
checkpoint=checkpoint,
proto_id=child.node_id)
local_object = self._lookup_dependency(child.local_name)
if local_object is None:
# We don't yet have a dependency registered with this name. Save it
# in case we do.
self._deferred_dependencies.setdefault(child.local_name, []).append(
child_position)
else:
if child_position.bind_object(checkpointable=local_object):
# This object's correspondence is new, so dependencies need to be
# visited. Delay doing it so that we get a breadth-first dependency
# resolution order (shallowest paths first). The caller is responsible
# for emptying visit_queue.
visit_queue.append(child_position)
return restore_ops
def _gather_saveables_for_checkpoint(self):
"""Returns a dictionary of values to checkpoint with this object.
Keys in the returned dictionary are local to this object and in a separate
namespace from dependencies. Values may either be `SaveableObject` factories
or variables easily converted to `SaveableObject`s (as in `tf.train.Saver`'s
`var_list` constructor argument).
`SaveableObjects` have a name set, which Checkpointable needs to generate
itself. So rather than returning `SaveableObjects` directly, this method
should return a dictionary of callables which take `name` arguments and
return `SaveableObjects` with that name.
If this object may also be passed to the global-name-based `tf.train.Saver`,
the returned callables should have a default value for their name argument
(i.e. be callable with no arguments).
Returned values must be saved only by this object; if any value may be
shared, it should instead be a dependency. For example, variable objects
save their own values with the key `VARIABLE_VALUE_KEY`, but objects which
reference variables simply add a dependency.
Returns:
The dictionary mapping attribute names to `SaveableObject` factories
described above. For example:
{VARIABLE_VALUE_KEY:
lambda name="global_name_for_this_object":
SaveableObject(name=name, ...)}
"""
if not hasattr(self, "get_config"):
return {}
try:
self.get_config()
except NotImplementedError:
return {}
weak_self = weakref.ref(self)
def _state_callback():
dereferenced_self = weak_self()
if dereferenced_self:
return json.dumps(dereferenced_self,
default=serialization.get_json_type,
sort_keys=True).encode("utf8")
else:
return ""
return {OBJECT_CONFIG_JSON_KEY: functools.partial(
PythonStringStateSaveable,
state_callback=_state_callback)}
| {
"repo_name": "apark263/tensorflow",
"path": "tensorflow/python/training/checkpointable/base.py",
"copies": "2",
"size": "36593",
"license": "apache-2.0",
"hash": -7203584268363516000,
"line_mean": 41.6491841492,
"line_max": 112,
"alpha_frac": 0.6970458831,
"autogenerated": false,
"ratio": 4.462560975609756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0068369686017380965,
"num_lines": 858
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# netrc.py copied from python 2.7.3 source code, modified to call
# str instead of repr for serializing the netrc file
# and pep8 modification
import os
import shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % (
tt,
file,
lexer.lineno
)
)
# We're looking at start of an entry for a named machine or
# default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (
tt == '' or
tt == 'machine' or
tt == 'default' or
tt == 'macdef'
):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine " + host
rep += "\n\tlogin " + str(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + str(attrs[1])
rep = rep + "\tpassword " + str(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print(netrc())
| {
"repo_name": "wercker/wercker-cli",
"path": "werckercli/netrc.py",
"copies": "1",
"size": "4558",
"license": "mit",
"hash": -8761095514677393000,
"line_mean": 34.3333333333,
"line_max": 77,
"alpha_frac": 0.4460289601,
"autogenerated": false,
"ratio": 4.473012757605495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5419041717705495,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
from __future__ import with_statement
import os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
fp.readline();
continue;
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt=='' or tt == 'machine' or
tt == 'default' or tt =='macdef'):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
| {
"repo_name": "liangazhou/django-rdp",
"path": "packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/netrc.py",
"copies": "11",
"size": "4372",
"license": "apache-2.0",
"hash": -3521046479452475000,
"line_mean": 35.7394957983,
"line_max": 80,
"alpha_frac": 0.4590576395,
"autogenerated": false,
"ratio": 4.363273453093813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020856945215777935,
"num_lines": 119
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import io, os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
if lexer.lineno == saved_lineno and len(tt) == 1:
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print(netrc())
| {
"repo_name": "rockyzhang/zhangyanhit-python-for-android-mips",
"path": "python3-alpha/python3-src/Lib/netrc.py",
"copies": "54",
"size": "4462",
"license": "apache-2.0",
"hash": 2974576033136968700,
"line_mean": 36.1833333333,
"line_max": 80,
"alpha_frac": 0.4594352308,
"autogenerated": false,
"ratio": 4.370225269343781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
fp.readline();
continue;
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt=='' or tt == 'machine' or
tt == 'default' or tt =='macdef'):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print(netrc())
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/netrc.py",
"copies": "1",
"size": "4335",
"license": "mit",
"hash": 6075586930331784000,
"line_mean": 35.7372881356,
"line_max": 80,
"alpha_frac": 0.4562860438,
"autogenerated": false,
"ratio": 4.365558912386707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5321844956186706,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex, stat
if os.name == 'posix':
import pwd
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
if lexer.lineno == saved_lineno and len(tt) == 1:
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print(netrc())
| {
"repo_name": "timm/timmnix",
"path": "pypy3-v5.5.0-linux64/lib-python/3/netrc.py",
"copies": "2",
"size": "5747",
"license": "mit",
"hash": 372927955630068740,
"line_mean": 39.1888111888,
"line_max": 80,
"alpha_frac": 0.4402296851,
"autogenerated": false,
"ratio": 4.532334384858044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008443747713535896,
"num_lines": 143
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex, stat
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
file = os.path.join(os.path.expanduser("~"), ".netrc")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
if lexer.lineno == saved_lineno and len(tt) == 1:
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
import pwd
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep += f"machine {host}\n\tlogin {attrs[0]}\n"
if attrs[1]:
rep += f"\taccount {attrs[1]}\n"
rep += f"\tpassword {attrs[2]}\n"
for macro in self.macros.keys():
rep += f"macdef {macro}\n"
for line in self.macros[macro]:
rep += line
rep += "\n"
return rep
if __name__ == '__main__':
print(netrc())
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/netrc.py",
"copies": "34",
"size": "5566",
"license": "apache-2.0",
"hash": -6009561102566695000,
"line_mean": 39.0431654676,
"line_max": 80,
"alpha_frac": 0.4380165289,
"autogenerated": false,
"ratio": 4.566037735849057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, stat, shlex
if os.name == 'posix':
import pwd
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
# seek to beginning of comment, in case reading the token put
# us on a new line, and then skip the rest of the line.
pos = len(tt) + 1
lexer.instream.seek(-pos, 1)
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
| {
"repo_name": "cloudfoundry/php-buildpack-legacy",
"path": "builds/runtimes/python-2.7.6/lib/python2.7/netrc.py",
"copies": "71",
"size": "5865",
"license": "mit",
"hash": -7070861124604151000,
"line_mean": 39.4482758621,
"line_max": 80,
"alpha_frac": 0.441943734,
"autogenerated": false,
"ratio": 4.528957528957529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
fp = open(file)
self.hosts = {}
self.macros = {}
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt=='' or tt == 'machine' or
tt == 'default' or tt =='macdef'):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/netrc.py",
"copies": "10",
"size": "4222",
"license": "mit",
"hash": 2222925342219004400,
"line_mean": 36.036036036,
"line_max": 80,
"alpha_frac": 0.4445760303,
"autogenerated": false,
"ratio": 4.4867162592986185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9931292289598619,
"avg_score": null,
"num_lines": null
} |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, stat, shlex
if os.name == 'posix':
import pwd
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
# seek to beginning of comment, in case reading the token put
# us on a new line, and then skip the rest of the line.
pos = len(tt) + 1
lexer.instream.seek(-pos, 1)
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
| {
"repo_name": "Jeff-Tian/mybnb",
"path": "Python27/Lib/netrc.py",
"copies": "3",
"size": "6010",
"license": "apache-2.0",
"hash": -1610442442750848000,
"line_mean": 39.4482758621,
"line_max": 80,
"alpha_frac": 0.431281198,
"autogenerated": false,
"ratio": 4.619523443504996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008074018516967774,
"num_lines": 145
} |
# An object oriented model of a circuit.
from cvxpy import *
import abc
class Node(object):
""" A node connecting devices. """
def __init__(self):
self.voltage = Variable()
self.current_flows = []
# The current entering a node equals the current leaving the node.
def constraints(self):
return [sum(f for f in self.current_flows) == 0]
class Ground(Node):
""" A node at 0 volts. """
def constraints(self):
return [self.voltage == 0] + super(Ground, self).constraints()
class Device(object):
__metaclass__ = abc.ABCMeta
""" A device on a circuit. """
def __init__(self, pos_node, neg_node):
self.pos_node = pos_node
self.pos_node.current_flows.append(-self.current())
self.neg_node = neg_node
self.neg_node.current_flows.append(self.current())
# The voltage drop on the device.
@abc.abstractmethod
def voltage(self):
return NotImplemented
# The current through the device.
@abc.abstractmethod
def current(self):
return NotImplemented
# Every path between two nodes has the same voltage drop.
def constraints(self):
return [self.pos_node.voltage - self.voltage() == self.neg_node.voltage]
class Resistor(Device):
""" A resistor with V = R*I. """
def __init__(self, pos_node, neg_node, resistance):
self._current = Variable()
self.resistance = resistance
super(Resistor, self).__init__(pos_node, neg_node)
def voltage(self):
return -self.resistance*self.current()
def current(self):
return self._current
class VoltageSource(Device):
""" A constant source of voltage. """
def __init__(self, pos_node, neg_node, voltage):
self._current = Variable()
self._voltage = voltage
super(VoltageSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
class CurrentSource(Device):
""" A constant source of current. """
def __init__(self, pos_node, neg_node, current):
self._current = current
self._voltage = Variable()
super(CurrentSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
# # Create a simple circuit and find the current and voltage.
nodes = [Ground(),Node(),Node()]
# A 5 V battery
devices = [VoltageSource(nodes[0], nodes[2], 10)]
# A series of pairs of parallel resistors.
# 1/4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[0], nodes[1], 0.25) )
devices.append( Resistor(nodes[0], nodes[1], 1) )
# 4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[1], nodes[2], 4) )
devices.append( Resistor(nodes[1], nodes[2], 1) )
# Create the problem.
constraints = []
for obj in nodes + devices:
constraints += obj.constraints()
Problem(Minimize(0), constraints).solve()
for node in nodes:
print node.voltage.value | {
"repo_name": "riadnassiffe/Simulator",
"path": "src/tools/ecos/cvxpy/examples/advanced/circuits.py",
"copies": "12",
"size": "3057",
"license": "mit",
"hash": 8882112684060921000,
"line_mean": 29.58,
"line_max": 80,
"alpha_frac": 0.6391887471,
"autogenerated": false,
"ratio": 3.6742788461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007527916340549175,
"num_lines": 100
} |
"""An object oriented processor to ingest csv, json and excel files."""
# Design considerations
# ---------------------
#
# This processor is a replacement for the `stream_from_path` processor,
# whose functional approach became somewhat unreadable and brittle to changes.
# This new object oriented approach should improve readability and facilitate
# the handling of special cases through subclassing.
import json
import cchardet
from logging import warning, info, error
import logging
from datapackage_pipelines.wrapper import ingest
from datapackage_pipelines.wrapper import spew
from os.path import splitext
from petl import fromdicts, look
from pip.utils import cached_property
from tabulator import Stream
from common.config import LOG_SAMPLE_SIZE
from common.utilities import format_to_json
class BaseIngestor(object):
"""A thin wrapper around the `tabulator.Stream` class."""
VALID_EXTENSIONS = (
'.csv',
'.json',
'.xls',
'.xlsx'
)
@classmethod
def load(cls, resource):
"""Return an instance of the relevant ingestor class."""
filename = resource['path']
_, extension = splitext(filename)
assert extension in cls.VALID_EXTENSIONS
ingestor_name = extension.lstrip('.').upper() + 'Ingestor'
ingestor_class = globals()[ingestor_name]
return ingestor_class(resource, extension)
def __init__(self, resource, extension):
self.resource = resource
self.extension = extension
@property
def rows(self):
"""Return a generator of rows."""
self._log_parameters()
self._check_headers()
info('Running preprocessors: %r', self._pre_processors)
for pre_processor in self._pre_processors:
pre_processor()
info('Opening resource: %s', self.resource['path'])
with Stream(self.resource['path'], **self._body_options) as stream:
info('First %s rows =\n%s', LOG_SAMPLE_SIZE, self._show(stream))
for row in stream.iter(keyed=True):
yield row
@property
def _body_options(self):
return {
'headers': self._headers,
'sample_size': LOG_SAMPLE_SIZE,
'post_parse': self._post_processors,
}
@property
def _header_options(self):
return {'headers': int(self._parser_options.get('headerRow', 1))}
@property
def _parser_options(self):
return self.resource.get('parser_options', {})
def _check_headers(self):
message = 'Fields and headers do no match'
extra_fields = set(self._fields) - set(self._headers)
extra_headers = set(self._headers) - set(self._fields)
if len(extra_headers) > 0:
logging.error('Headers in the source and not in the description file:')
for header in extra_headers:
logging.error('\t%s', header)
if len(extra_fields) > 0:
logging.error('Fields in the description file and not in the source:')
for field in extra_fields:
logging.error('\t%s', field)
assert set(self._headers) == set(self._fields), message
@property
def _pre_processors(self):
"""A list of processors invoked before streaming."""
return []
@property
def _post_processors(self):
"""A list of processors invoked after streaming."""
return []
@staticmethod
def _lowercase_empty_values(rows):
# This is a workaround waiting on the following to be fixed:
# https://github.com/frictionlessdata/jsontableschema-py/issues/139
for index, headers, row in rows:
row = [field.lower() if field in ['None', 'Null', 'Nil', 'NaN']
else field for field in row]
yield index, headers, row
@staticmethod
def force_strings(rows):
"""A post-parser processor to force all fields to strings."""
for index, headers, values in rows:
values_as_strings = \
list(
map(
lambda x:
str(x).strip().replace('\n', ' ')
if x is not None
else '',
values
)
)
yield index, headers, values_as_strings
@property
def _raw_headers(self):
"""Headers as found in the data file."""
with Stream(self.resource['path'], **self._header_options) as stream:
return stream.headers
@property
def _headers(self):
"""Headers without redundant blanks and/or line breaks."""
clean_headers = []
for raw_header in self._raw_headers:
if raw_header:
tokens = raw_header.split()
clean_field = ' '.join(tokens)
clean_headers.append(clean_field)
return clean_headers
@property
def _fields(self):
"""Fields expected in the data from the source file."""
return [field['name'] for field in self.resource['schema']['fields']]
def _log_parameters(self):
"""Record ingestion parameters to the log stream."""
fields_as_json = format_to_json(sorted(self._fields))
headers_as_json = format_to_json(sorted(self._headers))
options_as_json = format_to_json(self._body_options)
nb_empty_headers = len(self._fields) - len(self._headers)
info('Ignoring %s empty header fields', nb_empty_headers)
info('%s sourced fields = %s', len(self._fields), fields_as_json)
info('%s data fields = %s', len(self._headers), headers_as_json)
info('Ingestor options = %s', options_as_json)
@staticmethod
def _show(stream):
"""Return a table of sample data."""
keyed_rows = []
for row in stream.sample:
keyed_rows.append(dict(zip(stream.headers, row)))
petl_table = fromdicts(keyed_rows)
return repr(look(petl_table, limit=None))
def __repr__(self):
args = self.__class__.__name__, self.resource['name']
return '<{}: {}>'.format(*args)
class CSVIngestor(BaseIngestor):
"""An ingestor for csv files."""
@property
def _body_options(self):
options = super(CSVIngestor, self)._body_options
options.update(encoding=self._encoding)
if self._parser_options.get('delimiter'):
options.update(delimiter=self._parser_options['delimiter'])
if self._parser_options.get('quotechar'):
options.update(quotechar=self._parser_options['quotechar'])
if self._parser_options.get('quoting'):
options.update(quoting=self._parser_options['quoting'])
return options
@property
def _post_processors(self):
return [self._lowercase_empty_values,
self._skip_header, self._drop_bad_rows, self.force_strings]
@cached_property
def _encoding(self):
"""Select or detect the file encoding and set the resource utf-8."""
if self.resource.get('encoding'):
return self.resource['encoding']
else:
return self._detect_encoding()
def _detect_encoding(self):
"""Sniff the encoding using the entire file."""
with open(self.resource['path'], 'rb') as stream:
text = stream.read()
encoding = cchardet.detect(text)['encoding']
info('Detected %s encoding with cchardet', encoding)
return encoding
@property
def _header_options(self):
options = dict(headers=1, encoding=self._encoding)
if self._parser_options.get('delimiter'):
options.update(delimiter=self._parser_options['delimiter'])
return options
@staticmethod
def _drop_bad_rows(rows):
"""Drop rows when they don't match headers (post-processor)."""
for index, headers, row in rows:
while len(row) > len(headers) and len(row[-1].strip()) == 0:
row = row[:-1]
if len(row) == len(headers):
yield index, headers, row
else:
message = 'Bad row {}:\nheaders={}\nrow={}'\
.format(index, format_to_json(headers), format_to_json(row))
assert False, message
@staticmethod
def _skip_header(rows):
"""Skip the header (post-processor)."""
# Headers are passed as an option and need to be explicitly ignored.
for index, headers, values in rows:
if index != 1:
yield index, headers, values
class JSONIngestor(BaseIngestor):
"""An ingestor for json files."""
@property
def _body_options(self):
options = super(JSONIngestor, self)._body_options
options.update(encoding=self._encoding)
return options
@cached_property
def _encoding(self):
"""Select or detect the file encoding and set the resource utf-8."""
if self.resource.get('encoding'):
return self.resource['encoding']
else:
return 'utf-8'
@property
def _pre_processors(self):
return [self._fill_missing_fields]
@property
def _post_processors(self):
return [self._lowercase_empty_values,
self.force_strings]
@property
def _fill_missing_fields(self):
"""Pre-fill incomplete JSON rows (to avoid fields mixing up)."""
def fill_missing_fields():
with open(self.resource['path']) as stream:
rows = json.loads(stream.read())
for row in rows:
for header in self._raw_headers:
if header not in row:
row[header] = None
with open(self.resource['path'], 'w+') as stream:
stream.write(format_to_json(rows))
return fill_missing_fields
@cached_property
def _raw_headers(self):
"""Return all field names encountered in the file."""
keys = set()
with open(self.resource['path']) as stream:
rows = json.loads(stream.read())
for row in rows:
keys.update(set(row))
return sorted(keys)
class XLSIngestor(BaseIngestor):
"""An ingestor for xls files."""
@property
def _post_processors(self):
return [self._lowercase_empty_values,
self._skip_header,
self._fixed_points,
self.force_strings]
@staticmethod
def _skip_header(rows):
"""Skip the header (post-processor)."""
# Headers are passed as an option and need to be explicitly ignored.
for index, headers, values in rows:
if index != 1:
yield index, headers, values
@staticmethod
def _fixed_points(rows):
"""Convert floats to 2-digit fixed precision strings"""
for index, headers, values in rows:
values = [
'%.2f' % value if type(value) is float else value
for value in values
]
yield index, headers, values
XLSXIngestor = XLSIngestor
def ingest_resources(datapackage):
"""Ingest each resource one by one into the pipeline."""
for resource in datapackage['resources']:
ingestor = BaseIngestor.load(resource)
yield ingestor.rows
if __name__ == '__main__':
_, datapackage_, _ = ingest()
resources = list(ingest_resources(datapackage_))
spew(datapackage_, resources)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/processors/ingest_local_file.py",
"copies": "1",
"size": "11570",
"license": "mit",
"hash": 8253615151956443000,
"line_mean": 30.4402173913,
"line_max": 83,
"alpha_frac": 0.5866032844,
"autogenerated": false,
"ratio": 4.233443102817417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001992817139272526,
"num_lines": 368
} |
"""An object-oriented wrapper around RPi.GPIO"""
import RPi.GPIO as GPIO
import gpiocrust.edges as edges
from gpiocrust.pin_mode import PinMode
_edge_to_rpi_edge = {
edges.RISING: GPIO.RISING,
edges.FALLING: GPIO.FALLING,
edges.BOTH: GPIO.BOTH,
}
_pinmode_to_rpi_mode = {
PinMode.BCM: GPIO.BCM,
PinMode.BOARD: GPIO.BOARD
}
class Header(object):
"""Controls initializing and cleaning up GPIO header."""
def __init__(self, mode=PinMode.BOARD):
self._pinsForCleanup = []
GPIO.setmode(_pinmode_to_rpi_mode[mode])
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for pin in self._pinsForCleanup:
pin.cleanup()
GPIO.cleanup()
def registerPinForCleanup(self, pin):
self._pinsForCleanup.append(pin)
class OutputPin(object):
"""A single GPIO pin set for output"""
def __init__(self, pin, value=0):
self._pin = int(pin)
GPIO.setup(self._pin, GPIO.OUT, initial=value)
@property
def value(self):
return GPIO.input(self._pin)
@value.setter
def value(self, val):
GPIO.output(self._pin, int(val))
class PWMOutputPin(OutputPin):
"""
A single pulse width modulated output pin.
Note: duty cycle values are controlled via the `value` property and range in
value from 0.0 to 1.0, NOT 0 to 100.
"""
def __init__(self, pin, frequency=50.0, value=0.0):
super(PWMOutputPin, self).__init__(pin)
self._frequency = frequency
self._value = value
self._pulse = GPIO.PWM(self._pin, frequency)
self._pulse.start(self._value)
def __del__(self):
self._pulse.stop()
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, value):
self._frequency = float(
value) # No GetFrequency API provided, so store it
self._pulse.ChangeFrequency(value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = float(
value) # No GetDutyCycle API provided, so store it too
self._pulse.ChangeDutyCycle(self._value * 100.0)
class InputPin(object):
"""A single GPIO pin set for input"""
def __init__(self, pin, value=0, callback=None, edge=edges.BOTH, bouncetime=None, header=None):
"""
:param pin: GPIO pin to setup as an input.
:param value: Pull-up/down resistor value.
:type callback: callable
:param callback: Callback function for edge event. Must
:param edge: Edge event to detect.
:param bouncetime: Time in ms to ignore further edge events.
:type header: Header
:param header: Header with which to register input pin assignments, for eventual clean-up.
"""
self._pin = int(pin)
self._edge = _edge_to_rpi_edge[edge]
self._header = header
GPIO.setup(self._pin, GPIO.IN,
pull_up_down=GPIO.PUD_DOWN if value == 0 else GPIO.PUD_UP)
event_kwargs = {}
if bouncetime:
event_kwargs['bouncetime'] = bouncetime
GPIO.add_event_detect(gpio=self._pin, edge=self._edge, **event_kwargs)
if callable(callback):
GPIO.add_event_callback(self._pin, callback)
if header is not None:
header.registerPinForCleanup(self)
def __del__(self):
if self._header is None:
self.cleanup()
def cleanup(self):
GPIO.remove_event_detect(self._pin)
@property
def value(self):
return GPIO.input(self._pin)
def change(self, fn, edge=edges.BOTH):
"""Allow for `@change` decorator"""
def wrapped(pin):
fn(self.value)
GPIO.add_event_callback(self._pin, wrapped)
def wait_for_edge(self):
"""
This will remove remove any callbacks you might have specified
"""
GPIO.remove_event_detect(self._pin)
GPIO.wait_for_edge(self._pin, self._edge)
| {
"repo_name": "zourtney/gpiocrust",
"path": "gpiocrust/raspberry_pi.py",
"copies": "1",
"size": "4103",
"license": "mit",
"hash": 6270946800015393000,
"line_mean": 25.8169934641,
"line_max": 99,
"alpha_frac": 0.60272971,
"autogenerated": false,
"ratio": 3.709764918625678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783805045796662,
"avg_score": 0.005737916565803277,
"num_lines": 153
} |
# An object that wraps the PShape.
class Wiggler(object):
def __init__(self):
# For 2D Perlin noise
self.yoff = 0
# Its location
self.x = width / 2
self.y = height / 2
# The "original" locations of the vertices make up a circle.
# We are using a list to keep a duplicate copy
# of vertices original locations.
self.original = []
for a in range(0, int(range(TWO_PI * 10)), 2):
ascaled = a * .1
v = PVector.fromAngle(ascaled)
v.mult(100)
self.original.append(v)
# The PShape to be "wiggled".
# Make the PShape with those vertices.
self.s = createShape()
self.s.beginShape()
self.s.fill(127)
self.s.stroke(0)
self.s.strokeWeight(2)
for v in self.original:
self.s.vertex(v.x, v.y)
self.s.endShape(CLOSE)
def wiggle(self):
xoff = 0
# Apply an offset to each vertex.
for i in range(self.s.getVertexCount()):
# Calculate a vertex location based on noise around "original"
# location.
pos = self.original[i]
a = TWO_PI * noise(xoff, self.yoff)
r = PVector.fromAngle(a)
r.mult(4)
r.add(pos)
# Set the location of each vertex to the one.
self.s.setVertex(i, r.x, r.y)
# Increment perlin noise x value.
xoff += 0.5
# Increment perlin noise y value.
self.yoff += 0.02
def display(self):
with pushMatrix():
translate(self.x, self.y)
shape(self.s)
| {
"repo_name": "mashrin/processing.py",
"path": "mode/examples/Topics/Create Shapes/WigglePShape/wiggler.py",
"copies": "2",
"size": "1665",
"license": "apache-2.0",
"hash": 5550498217108037000,
"line_mean": 31.6470588235,
"line_max": 74,
"alpha_frac": 0.5261261261,
"autogenerated": false,
"ratio": 3.627450980392157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 51
} |
"""An object to represent arc records
http://archive.org/web/researcher/ArcFileFormat.php
"""
import re
from hanzo.warctools.record import ArchiveRecord, ArchiveParser
from hanzo.warctools.archive_detect import register_record_type
# URL<sp>IP-address<sp>Archive-date<sp>Content-type<sp>
#Result-code<sp>Checksum<sp>Location<sp> Offset<sp>Filename<sp>
#Archive-length<nl>
#
@ArchiveRecord.HEADERS(
URL = b'URL',
IP = b'IP-address',
DATE = b'Archive-date',
CONTENT_TYPE = b'Content-type',
CONTENT_LENGTH = b'Archive-length',
RESULT_CODE = b'Result-code',
CHECKSUM = b'Checksum',
LOCATION = b'Location',
OFFSET = b'Offset',
FILENAME = b'Filename',
)
class ArcRecord(ArchiveRecord):
TRAILER = b'\n' # an ARC record is trailed by single unix newline
"""Represents a record in an arc file."""
def __init__(self, headers=None, content=None, errors=None):
ArchiveRecord.__init__(self, headers, content, errors)
@property
def type(self):
return b"response"
def _write_to(self, out, nl):
#TODO: empty method?
pass
@classmethod
def make_parser(cls):
"""Constructs a parser for arc records."""
return ArcParser()
class ArcRecordHeader(ArcRecord):
"""Represents the headers in an arc record."""
def __init__(self, headers=None, content=None, errors=None, version=None,
raw_headers=None):
ArcRecord.__init__(self, headers, content, errors)
self.version = version
self.raw_headers = raw_headers
@property
def type(self):
return b"filedesc"
def raw(self):
"""Return the raw representation of this record."""
return b"".join(self.raw_headers) + self.content[1]
def rx(pat):
"""Helper function to compile a regular expression with the IGNORECASE
flag."""
return re.compile(pat, flags=re.IGNORECASE)
nl_rx = rx('^\r\n|\r|\n$')
length_rx = rx(b'^' + ArcRecord.CONTENT_LENGTH + b'$') #pylint: disable-msg=E1101
type_rx = rx(b'^' + ArcRecord.CONTENT_TYPE + b'$') #pylint: disable-msg=E1101
SPLIT = re.compile(br'\b\s|\s\b').split
class ArcParser(ArchiveParser):
"""A parser for arc archives."""
def __init__(self):
self.version = 0
# we don't know which version to parse initially - a v1 or v2 file so
# we read the filedesc because the order and number of the headers
# change between versions.
# question? will we get arc fragments?
# should we store both headers & detect records by header length?
# if we don't know
self.headers = []
def parse(self, stream, offset, line=None):
"""Parses a stream as an arc archive and returns an Arc record along
with the offset in the stream of the end of the record."""
record = None
content_type = None
content_length = None
if line is None:
line = stream.readline()
while not line.rstrip():
if not line:
return (None, (), offset)
line = stream.readline()
if line.startswith(b'filedesc:'):
raw_headers = []
raw_headers.append(line)
# read headers named in body of record
# to assign names to header, to read body of record
arc_version_line = stream.readline()
raw_headers.append(arc_version_line)
arc_names_line = stream.readline()
raw_headers.append(arc_names_line)
arc_version = arc_version_line.strip()
# configure parser instance
self.version = arc_version.split()[0]
self.headers = arc_names_line.strip().split()
# now we have read header field in record body
# we can extract the headers from the current record,
# and read the length field
# which is in a different place with v1 and v2
# read headers
arc_headers = self.parse_header_list(line)
# extract content, ignoring header lines parsed already
content_type, content_length, errors = \
self.get_content_headers(arc_headers)
content_length = content_length \
- len(arc_version_line) \
- len(arc_names_line)
record = ArcRecordHeader(headers=arc_headers,
version=arc_version,
errors=errors,
raw_headers=raw_headers)
else:
if not self.headers:
raise Exception('missing filedesc')
headers = self.parse_header_list(line)
content_type, content_length, errors = \
self.get_content_headers(headers)
record = ArcRecord(headers = headers, errors=errors)
line = None
record.content_file = stream
record.content_file.bytes_to_eoc = content_length
return (record, (), offset)
def trim(self, stream):
return ()
def parse_header_list(self, line):
# some people use ' ' as the empty value. lovely.
line = line.rstrip(b'\r\n')
values = SPLIT(line)
if len(self.headers) != len(values):
if self.headers[0] in (ArcRecord.URL, ArcRecord.CONTENT_TYPE):
# fencepost
values = [s[::-1] for s in reversed(SPLIT(line[::-1], len(self.headers)-1))]
else:
values = SPLIT(line, len(self.headers)-1)
if len(self.headers) != len(values):
raise Exception('missing headers %s %s'%(",".join(values), ",".join(self.headers)))
return list(zip(self.headers, values))
@staticmethod
def get_content_headers(headers):
content_type = None
content_length = None
errors = []
for name, value in headers:
if type_rx.match(name):
if value:
content_type = value
else:
errors.append(('invalid header', name, value))
elif length_rx.match(name):
try:
content_length = int(value)
except ValueError:
errors.append(('invalid header', name, value))
return content_type, content_length, errors
register_record_type(re.compile(br'^filedesc://'), ArcRecord)
| {
"repo_name": "internetarchive/warctools",
"path": "hanzo/warctools/arc.py",
"copies": "1",
"size": "6499",
"license": "mit",
"hash": 4692719745122951000,
"line_mean": 31.9898477157,
"line_max": 95,
"alpha_frac": 0.5753192799,
"autogenerated": false,
"ratio": 4.064415259537211,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139734539437211,
"avg_score": null,
"num_lines": null
} |
"""An object to represent arc records"""
import re
from hanzo.warctools.record import ArchiveRecord, ArchiveParser
from hanzo.warctools.archive_detect import register_record_type
# URL<sp>IP-address<sp>Archive-date<sp>Content-type<sp>
#Result-code<sp>Checksum<sp>Location<sp> Offset<sp>Filename<sp>
#Archive-length<nl>
#
@ArchiveRecord.HEADERS(
URL = 'URL',
IP = 'IP-address',
DATE = 'Archive-date',
CONTENT_TYPE = 'Content-type',
CONTENT_LENGTH = 'Archive-length',
RESULT_CODE = 'Result-code',
CHECKSUM = 'Checksum',
LOCATION = 'Location',
OFFSET = 'Offset',
FILENAME = 'Filename',
)
class ArcRecord(ArchiveRecord):
"""Represents a record in an arc file."""
def __init__(self, headers=None, content=None, errors=None):
ArchiveRecord.__init__(self, headers, content, errors)
@property
def type(self):
return "response"
def _write_to(self, out, nl):
#TODO: empty method?
pass
@classmethod
def make_parser(cls):
"""Constructs a parser for arc records."""
return ArcParser()
class ArcRecordHeader(ArcRecord):
"""Represents the headers in an arc record."""
def __init__(self, headers=None, content=None, errors=None, version=None,
raw_headers=None):
ArcRecord.__init__(self, headers, content, errors)
self.version = version
self.raw_headers = raw_headers
@property
def type(self):
return "filedesc"
def raw(self):
"""Return the raw representation of this record."""
return "".join(self.raw_headers) + self.content[1]
def rx(pat):
"""Helper function to compile a regular expression with the IGNORECASE
flag."""
return re.compile(pat, flags=re.IGNORECASE)
nl_rx = rx('^\r\n|\r|\n$')
length_rx = rx('^%s$' % ArcRecord.CONTENT_LENGTH) #pylint: disable-msg=E1101
type_rx = rx('^%s$' % ArcRecord.CONTENT_TYPE) #pylint: disable-msg=E1101
class ArcParser(ArchiveParser):
"""A parser for arc archives."""
def __init__(self):
self.version = 0
# we don't know which version to parse initially - a v1 or v2 file so
# we read the filedesc because the order and number of the headers
# change between versions.
# question? will we get arc fragments?
# should we store both headers & detect records by header length?
# if we don't know
self.headers = []
self.trailing_newlines = 0
def parse(self, stream, offset):
"""Parses a stream as an arc archive and returns an Arc record along
with the offset in the stream of the end of the record."""
record = None
content_type = None
content_length = None
line = stream.readline()
while not line.rstrip():
if not line:
return (None, (), offset)
self.trailing_newlines -= 1
line = stream.readline()
if line.startswith('filedesc:'):
raw_headers = []
raw_headers.append(line)
# read headers named in body of record
# to assign names to header, to read body of record
arc_version_line = stream.readline()
raw_headers.append(arc_version_line)
arc_names_line = stream.readline()
raw_headers.append(arc_names_line)
arc_version = arc_version_line.strip()
# configure parser instance
self.version = arc_version.split()[0]
self.headers = arc_names_line.strip().split()
# now we have read header field in record body
# we can extract the headers from the current record,
# and read the length field
# which is in a different place with v1 and v2
# read headers
arc_headers = self.get_header_list(line.strip().split())
# extract content, ignoring header lines parsed already
content_type, content_length, errors = \
self.get_content_headers(arc_headers)
content_length = content_length \
- len(arc_version_line) \
- len(arc_names_line)
record = ArcRecordHeader(headers=arc_headers,
version=arc_version,
errors=errors,
raw_headers=raw_headers)
else:
if not self.headers:
raise StandardError('missing filedesc')
headers = self.get_header_list(line.strip().split())
content_type, content_length, errors = \
self.get_content_headers(headers)
record = ArcRecord(headers = headers, errors=errors)
line = None
if content_length:
content = []
length = 0
while length < content_length:
line = stream.readline()
if not line:
# print 'no more data'
break
content.append(line)
length += len(line)
content = "".join(content)
content, line = \
content[0:content_length], content[content_length+1:]
record.content = (content_type, content)
if line:
record.error('trailing data at end of record', line)
if line == '':
self.trailing_newlines = 1
return (record, (), offset)
def trim(self, stream):
return ()
def get_header_list(self, values):
return zip(self.headers, values)
@staticmethod
def get_content_headers(headers):
content_type = None
content_length = None
errors = []
for name, value in headers:
if type_rx.match(name):
if value:
content_type = value
else:
errors.append(('invalid header', name, value))
elif length_rx.match(name):
try:
content_length = int(value)
except ValueError:
errors.append(('invalid header', name, value))
return content_type, content_length, errors
register_record_type(re.compile('^filedesc://'), ArcRecord)
| {
"repo_name": "alard/warctozip",
"path": "hanzo/warctools/arc.py",
"copies": "1",
"size": "6341",
"license": "mit",
"hash": -3359917911969275400,
"line_mean": 31.6855670103,
"line_max": 77,
"alpha_frac": 0.563948904,
"autogenerated": false,
"ratio": 4.298983050847458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009923943544457045,
"num_lines": 194
} |
"""An object to represent warc records, using the abstract record in
record.py"""
import re
import hashlib
from hanzo.warctools.record import ArchiveRecord, ArchiveParser
from hanzo.warctools.archive_detect import register_record_type
import uuid
bad_lines = 5 # when to give up looking for the version stamp
@ArchiveRecord.HEADERS(
DATE=b'WARC-Date',
TYPE=b'WARC-Type',
ID=b'WARC-Record-ID',
CONCURRENT_TO=b'WARC-Concurrent-To',
REFERS_TO=b'WARC-Refers-To',
REFERS_TO_TARGET_URI=b'WARC-Refers-To-Target-URI',
REFERS_TO_DATE=b'WARC-Refers-To-Date',
CONTENT_LENGTH=b'Content-Length',
CONTENT_TYPE=b'Content-Type',
URL=b'WARC-Target-URI',
BLOCK_DIGEST=b'WARC-Block-Digest',
PAYLOAD_DIGEST=b'WARC-Payload-Digest',
IP_ADDRESS=b'WARC-IP-Address',
FILENAME=b'WARC-Filename',
WARCINFO_ID=b'WARC-Warcinfo-ID',
PROFILE=b'WARC-Profile'
)
class WarcRecord(ArchiveRecord):
# Pylint is very bad at decorators, E1101 is the message that says
# a member variable does not exist
# pylint: disable-msg=E1101
VERSION = b"WARC/1.0"
VERSION18 = b"WARC/0.18"
VERSION17 = b"WARC/0.17"
RESPONSE = b"response"
RESOURCE = b"resource"
REQUEST = b"request"
REVISIT = b"revisit"
METADATA = b"metadata"
CONVERSION = b"conversion"
WARCINFO = b"warcinfo"
PROFILE_IDENTICAL_PAYLOAD_DIGEST = b"http://netpreserve.org/warc/1.0/revisit/identical-payload-digest"
TRAILER = b'\r\n\r\n'
def __init__(self, version=VERSION, headers=None, content=None,
errors=None, content_file=None):
"""
WarcRecord constructor.
Either content or content_file must be provided, but not both. If
content, which is a tuple (content_type, content_buffer), is provided,
when writing the warc record, any Content-Type and Content-Length that
appear in the supplied headers are ignored, and the values content[0]
and len(content[1]), respectively, are used.
When reading, the caller can stream content_file or use content, which is
lazily filled using content_file, and after which content_file is
unavailable.
"""
ArchiveRecord.__init__(self, headers, content, errors)
self.version = version
self.content_file = content_file
@property
def id(self):
return self.get_header(self.ID)
def _write_to(self, out, nl):
"""WARC Format:
VERSION NL
(Key: Value NL)*
NL
CONTENT NL
NL
don't write multi line headers
"""
out.write(self.version)
out.write(nl)
for k, v in self.headers:
if self.content_file is not None or k not in (self.CONTENT_TYPE, self.CONTENT_LENGTH):
out.write(k)
out.write(b": ")
out.write(v)
out.write(nl)
if self.content_file is not None:
out.write(nl) # end of header blank nl
while True:
buf = self.content_file.read(8192)
if buf == b'': break
out.write(buf)
else:
# if content tuple is provided, set Content-Type and
# Content-Length based on the values in the tuple
content_type, content_buffer = self.content
if content_type:
out.write(self.CONTENT_TYPE)
out.write(b": ")
out.write(content_type)
out.write(nl)
if content_buffer is None:
content_buffer = b""
content_length = len(content_buffer)
out.write(self.CONTENT_LENGTH)
out.write(b": ")
out.write(str(content_length).encode('ascii'))
out.write(nl)
out.write(nl) # end of header blank nl
if content_buffer:
out.write(content_buffer)
# end of record nl nl
out.write(nl)
out.write(nl)
out.flush()
def repair(self):
pass
def validate(self):
return self.errors
@classmethod
def make_parser(self):
return WarcParser()
def block_digest(self, content_buffer):
block_hash = hashlib.sha256()
block_hash.update(content_buffer)
digest = "sha256:%s" % block_hash.hexdigest()
return digest
@staticmethod
def warc_uuid(text):
return "<urn:uuid:{}>".format(uuid.UUID(hashlib.sha1(text).hexdigest()[0:32])).encode('ascii')
@staticmethod
def random_warc_uuid():
return "<urn:uuid:{}>".format(uuid.uuid4()).encode('ascii')
def rx(pat):
"""Helper to compile regexps with IGNORECASE option set."""
return re.compile(pat, flags=re.IGNORECASE)
version_rx = rx(br'^(?P<prefix>.*?)(?P<version>\s*WARC/(?P<number>.*?))'
b'(?P<nl>\r\n|\r|\n)\\Z')
# a header is key: <ws> value plus any following lines with leading whitespace
header_rx = rx(br'^(?P<name>.*?):\s?(?P<value>.*?)' b'(?P<nl>\r\n|\r|\n)\\Z')
value_rx = rx(br'^\s+(?P<value>.+?)' b'(?P<nl>\r\n|\r|\n)\\Z')
nl_rx = rx(b'^(?P<nl>\r\n|\r|\n\\Z)')
length_rx = rx(b'^' + WarcRecord.CONTENT_LENGTH + b'$' ) # pylint: disable-msg=E1101
type_rx = rx(b'^' + WarcRecord.CONTENT_TYPE + b'$') # pylint: disable-msg=E1101
required_headers = set((
WarcRecord.TYPE.lower(), # pylint: disable-msg=E1101
WarcRecord.ID.lower(), # pylint: disable-msg=E1101
WarcRecord.CONTENT_LENGTH.lower(), # pylint: disable-msg=E1101
WarcRecord.DATE.lower(), # pylint: disable-msg=E1101
))
class WarcParser(ArchiveParser):
KNOWN_VERSIONS = set((b'1.0', b'0.17', b'0.18'))
def parse(self, stream, offset, line=None):
"""Reads a warc record from the stream, returns a tuple
(record, errors). Either records is null or errors is
null. Any record-specific errors are contained in the record -
errors is only used when *nothing* could be parsed"""
# pylint: disable-msg=E1101
errors = []
version = None
# find WARC/.*
if line is None:
line = stream.readline()
while line:
match = version_rx.match(line)
if match:
version = match.group('version')
if offset is not None:
offset += len(match.group('prefix'))
break
else:
if offset is not None:
offset += len(line)
if not nl_rx.match(line):
errors.append(('ignored line', line))
if len(errors) > bad_lines:
errors.append(('too many errors, giving up hope',))
return (None, errors, offset)
line = stream.readline()
if not line:
if version:
errors.append(('warc version but no headers', version))
return (None, errors, offset)
if line:
content_length = 0
content_type = None
record = WarcRecord(errors=errors, version=version)
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in version', match.group('nl'))
if match.group('number') not in self.KNOWN_VERSIONS:
record.error('version field is not known (%s)'
% (",".join(self.KNOWN_VERSIONS)),
match.group('number'))
prefix = match.group('prefix')
if prefix:
record.error('bad prefix on WARC version header', prefix)
#Read headers
line = stream.readline()
while line and not nl_rx.match(line):
#print 'header', repr(line)
match = header_rx.match(line)
if match:
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in header',
match.group('nl'))
name = match.group('name').strip()
value = [match.group('value').strip()]
#print 'match',name, value
line = stream.readline()
match = value_rx.match(line)
while match:
#print 'follow', repr(line)
if match.group('nl') != b'\x0d\x0a':
record.error('incorrect newline in follow header',
line, match.group('nl'))
value.append(match.group('value').strip())
line = stream.readline()
match = value_rx.match(line)
value = b" ".join(value)
record.headers.append((name, value))
if type_rx.match(name):
if value:
content_type = value
else:
record.error('invalid header', name, value)
elif length_rx.match(name):
try:
#print name, value
content_length = int(value)
#print content_length
except ValueError:
record.error('invalid header', name, value)
# have read blank line following headers
record.content_file = stream
record.content_file.bytes_to_eoc = content_length
# check mandatory headers
# WARC-Type WARC-Date WARC-Record-ID Content-Length
return (record, (), offset)
blank_rx = rx(br'^$')
register_record_type(version_rx, WarcRecord)
register_record_type(blank_rx, WarcRecord)
def make_response(id, date, url, content, request_id):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.RESPONSE),
(WarcRecord.ID, id),
(WarcRecord.DATE, date),
(WarcRecord.URL, url),
]
if request_id:
headers.append((WarcRecord.CONCURRENT_TO, request_id))
record = WarcRecord(headers=headers, content=content)
return record
def make_request(request_id, date, url, content, response_id):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.REQUEST),
(WarcRecord.ID, request_id),
(WarcRecord.DATE, date),
(WarcRecord.URL, url),
]
if response_id:
headers.append((WarcRecord.CONCURRENT_TO, response_id))
record = WarcRecord(headers=headers, content=content)
return record
def make_metadata(meta_id, date, content, concurrent_to=None, url=None):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.METADATA),
(WarcRecord.ID, meta_id),
(WarcRecord.DATE, date),
]
if concurrent_to:
headers.append((WarcRecord.CONCURRENT_TO, concurrent_to))
if url:
headers.append((WarcRecord.URL, url))
record = WarcRecord(headers=headers, content=content)
return record
def make_conversion(conv_id, date, content, refers_to=None, url=None):
# pylint: disable-msg=E1101
headers = [
(WarcRecord.TYPE, WarcRecord.CONVERSION),
(WarcRecord.ID, conv_id),
(WarcRecord.DATE, date),
]
if refers_to:
headers.append((WarcRecord.REFERS_TO, refers_to))
if url:
headers.append((WarcRecord.URL, url))
record = WarcRecord(headers=headers, content=content)
return record
def warc_datetime_str(d):
s = d.isoformat()
if '.' in s:
s = s[:s.find('.')]
return (s + 'Z').encode('utf-8')
| {
"repo_name": "internetarchive/warctools",
"path": "hanzo/warctools/warc.py",
"copies": "1",
"size": "11905",
"license": "mit",
"hash": 4394548616748940300,
"line_mean": 31.6164383562,
"line_max": 106,
"alpha_frac": 0.5495170097,
"autogenerated": false,
"ratio": 3.8829093281148075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9924857797960909,
"avg_score": 0.0015137079707798506,
"num_lines": 365
} |
"""An observer keeping track of running average positions of atoms.
"""
from asap3.Internal.Subject import Subject
class RunningAverage:
"""Calculate running average positions of atoms.
Attach this observer to the atoms to make average positions available
e.g. for CNA analysis. The filter is called often (maybe every 5 time
steps). After it has been called interval times (or a multiple hereof)
the average positions are calculated, and the attached analysis
functions are called. numavg specifies over how many calls are used
to calculate the average positions. If numavg==interval, all calls
are used to collect data, and the average is over all times since the
last analysis. If numavg<interval then only the last numavg calls
before the analysis is used to collect data. Currently, numavg>interval
is not supported.
During analysis, positions of the atoms are set to the average positions.
After analysis, observers can be called e.g. to store the results.
Observers may be called before or after the positions are restored to
their instantaneous values.
Warning: As the atoms are changed before and after analysis, migration
may be triggered in parallel simulations.
Parameters:
atoms:
The atoms being observed. Temporary data is stored in the atoms.
interval:
How often analysis is done.
numavg:
How many frames should be used for the averaging.
Important methods:
update():
Update the running average, and do analysis if needed. This
is the function that should be attached to the dynamics.
__call__():
Same as update()
attach_analysis(callable, *args, **kwargs):
Attach an analysis function. Arguments are passed on.
attach_observer(callable, interval=1, *args, **kwargs):
Call an external observer for example to store the result
of the analysis. The observer is called every interval'th
time the analysis is perfomed, it is rare to set interval
different from 1. The observer is called after the
positions have been restored to their instantaneous
values.
attach_observer_avgpos(callable, interval=1, *args, **kwargs):
As attach_observer(), except that the observer is called while
the positions still have their average value.
"""
def __init__(self, atoms, interval, numavg):
self.atoms = atoms
self.interval = interval
self.numavg = numavg
if numavg > interval:
raise NotImplementedError("The case numavg>interval has not yet been implemented.")
self.firstdata = self.interval - self.numavg
self.analysers = Subject()
self.observers = Subject()
self.avgobservers = Subject()
self.extra_data_buffer = []
self.n = 0
self.m = 0
def attach_analysis(self, func, *args, **kwargs):
"Attach an analysis method"
self.analysers.attach(func, 1, *args, **kwargs)
def attach_observer(self, func, interval=1, *args, **kwargs):
"Attach an observer called after analysis"
if self.extra_data_buffer and hasattr(func, 'set_extra_data'):
for name, source, once in self.extra_data_buffer:
func.set_extra_data(name, source, once)
self.observers.attach(func, interval, *args, **kwargs)
def attach_observer_avgpos(self, func, interval=1, *args, **kwargs):
"Attach an observer called after analysis but while positions remain averaged."
self.avgobservers.attach(func, interval, *args, **kwargs)
def reset(self):
"Reset internal variables after an analysis"
self.n = 0
self.m = 0
if self.atoms.has('avgpositions'):
self.atoms.set_array('avgpositions', None)
if self.atoms.has('sumpositions'):
# This should not normally happen.
self.atoms.set_array('sumpositions', None)
def update(self):
self.n += 1
if self.n <= self.firstdata:
return # Do nothing
# Take data, and include in average
if self.atoms.has('sumpositions'):
sp = self.atoms.get_array('sumpositions') + self.atoms.get_positions()
else:
sp = self.atoms.get_positions()
self.atoms.set_array('sumpositions', sp)
self.m += 1
if self.n == self.interval:
assert self.m == self.numavg
self.atoms.set_array('avgpositions', sp * (1.0/self.m))
self.atoms.set_array('sumpositions', None)
# Now save the instantanous positions and set the positions to
# their average
self.atoms.set_array('instant_positions', self.atoms.get_positions())
self.atoms.set_positions(self.atoms.get_array('avgpositions'))
# Call analysis and then observers (avg pos)
self.analysers.call_observers()
self.avgobservers.call_observers()
# Reset positions to instantaneous value
self.atoms.set_positions(self.atoms.get_array('instant_positions'))
self.atoms.set_array('instant_positions', None)
self.observers.call_observers()
self.reset()
__call__ = update
def set_extra_data(self, name, source=None, once=False):
"""Forward to observers that extra data should be stored.
This function is intended to make sure that if a BundleTrajectory
is attached to an NPT dynamics through a RunningAverage object,
then information about the dynamics is still saved in the
BundleTrajectory.
It works by forwaring the call to set_extra_data to any observers
having a set_extra_data method - whether they are attached before
or after calling this function.
"""
self.extra_data_buffer.append((name, source, once))
for obs in self.observers.observers:
f = obs[0]
if hasattr(f, "set_extra_data"):
f.set_extra_data(name, source, once)
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/analysis/averagepositions.py",
"copies": "1",
"size": "6218",
"license": "mit",
"hash": -7141423477147109000,
"line_mean": 40.4533333333,
"line_max": 95,
"alpha_frac": 0.6383081377,
"autogenerated": false,
"ratio": 4.273539518900344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01964021347475762,
"num_lines": 150
} |
"""A node, and related classes, for use in expression trees."""
import itertools
from collections import deque
from tt.definitions import (
MAX_OPERATOR_STR_LEN,
OPERATOR_MAPPING,
SYMBOLIC_OPERATOR_MAPPING,
TT_AND_OP,
TT_IMPL_OP,
TT_NAND_OP,
TT_NOR_OP,
TT_NOT_OP,
TT_OR_OP,
TT_XOR_OP,
TT_XNOR_OP)
from tt.errors import (
InvalidArgumentTypeError,
InvalidArgumentValueError,
RequiresNormalFormError)
_DEFAULT_INDENT_SIZE = MAX_OPERATOR_STR_LEN + 1
class ExpressionTreeNode(object):
"""A base class for expression tree nodes.
This class is extended within tt and is not meant to be used
directly.
If you plan to extend it, note that descendants of this class
must compute the ``_is_cnf``, ``_is_dnf``, and ``_is_really_unary`` boolean
attributes and the ``_non_negated_symbol_set`` and ``_negated_symbol_set``
set attributes within their initialization. Additionally, descendants of
this class must implemented the ``__eq__`` magic method (but not
``__ne__``) as well as the private ``_copy`` transformation.
"""
def __init__(self, symbol_name, l_child=None, r_child=None):
self._symbol_name = symbol_name
self._l_child = l_child
self._r_child = r_child
@property
def symbol_name(self):
"""The string operator/operand name wrapped in this node.
:type: :class:`str <python:str>`
"""
return self._symbol_name
@property
def l_child(self):
"""This node's left child; ``None`` indicates the absence of a child.
:type: :class:`ExpressionTreeNode` or ``None``
"""
return self._l_child
@property
def r_child(self):
"""This node's left child; ``None`` indicates the absence of a child.
:type: :class:`ExpressionTreeNode` or ``None``
"""
return self._r_child
@property
def is_cnf(self):
"""Whether the tree rooted at this node is in conjunctive normal form.
:type: :class:`bool <python:bool>`
"""
return self._is_cnf
@property
def is_dnf(self):
"""Whether the tree rooted at this node is in disjunctive normal form.
:type: :class:`bool <python:bool>`
"""
return self._is_dnf
@property
def non_negated_symbol_set(self):
"""A set of the non-negated symbols present in the tree rooted here.
:type: Set[:class:`str <python:str>`]
"""
return self._non_negated_symbol_set
@property
def negated_symbol_set(self):
"""A set of the negated symbols present in the tree rooted here.
:type: Set[:class:`str <python:str>`]
"""
return self._negated_symbol_set
@property
def is_really_unary(self):
"""Whether the tree rooted at this node contains no binary operators.
:type: :class:`bool <python:bool>`
"""
return self._is_really_unary
@staticmethod
def build_tree(postfix_tokens):
"""Build a tree from a list of expression tokens in postfix order.
This method does not check that the tokens are indeed in postfix order;
undefined behavior will ensue if you pass tokens in an order other than
postfix.
:param postfix_tokens: A list of string tokens from which to construct
the tree of expression nodes.
:type postfix_tokens: List[:class:`str <python:str>`]
:returns: The root node of the constructed tree.
:rtype: :class:`ExpressionTreeNode`
:raises InvalidArgumentTypeError: If ``postfix_tokens`` is not a list
of strings.
:raises InvalidArgumentValueError: If ``postfix_tokens`` is empty.
"""
if (not isinstance(postfix_tokens, list) or
not all(isinstance(elt, str) for elt in postfix_tokens)):
raise InvalidArgumentTypeError(
'postfix_tokens must be a list of strings')
elif not postfix_tokens:
raise InvalidArgumentValueError('postfix_tokens cannot be empty')
stack = []
operators = OPERATOR_MAPPING.keys()
for token in postfix_tokens:
if token in operators:
if OPERATOR_MAPPING[token] == TT_NOT_OP:
node = UnaryOperatorExpressionTreeNode(
token, stack.pop())
else:
right, left = stack.pop(), stack.pop()
node = BinaryOperatorExpressionTreeNode(
token, left, right)
else:
node = OperandExpressionTreeNode(token)
stack.append(node)
return stack.pop()
def iter_clauses(self):
"""Iterate the clauses in the expression tree rooted at this node.
If the normal form of the expression is ambiguous, then precedence will
be given to conjunctive normal form.
:returns: Iterator of each CNF or DNF clause, rooted by a tree node,
contained within the expression tree rooted at this node.
:rtype: Iterator[:class:`ExpressionTreeNode`]
:raises RequiresNormalFormError: If this expression is not in
conjunctive or disjunctive normal form.
"""
if self._is_cnf:
for node in self.iter_cnf_clauses():
yield node
elif self._is_dnf:
for node in self.iter_dnf_clauses():
yield node
else:
raise RequiresNormalFormError(
'Must be in conjunctive or disjunctive normal form to '
'iterate clauses')
def iter_cnf_clauses(self):
"""Iterate the clauses in conjunctive normal form order.
:returns: Iterator of each CNF clause, rooted by a tree node, contained
within the expression tree rooted at this node.
:rtype: Iterator[:class:`ExpressionTreeNode`]
:raises RequiresNormalFormError: If the expression tree rooted at this
node is not in conjunctive normal form.
"""
if not self._is_cnf:
raise RequiresNormalFormError(
'Must be in conjunctive normal form to iterate CNF clauses')
elif (isinstance(self, BinaryOperatorExpressionTreeNode) and
self._operator == TT_AND_OP):
child_iter = itertools.chain(
self._l_child.iter_cnf_clauses(),
self._r_child.iter_cnf_clauses())
for node in child_iter:
yield node
else:
yield self
def iter_dnf_clauses(self):
"""Iterate the clauses in disjunctive normal form order.
:returns: Iterator of each DNF clause, rooted by a tree node, contained
within the expression tree rooted at this node.
:rtype: Iterator[:class:`ExpressionTreeNode`]
:raises RequiresNormalFormError: If the expression tree rooted at this
node is not in disjunctive normal form.
"""
if not self._is_dnf:
raise RequiresNormalFormError(
'Must be in conjunctive normal form to iterate DNF clauses')
elif (isinstance(self, BinaryOperatorExpressionTreeNode) and
self._operator == TT_OR_OP):
child_iter = itertools.chain(
self._l_child.iter_dnf_clauses(),
self._r_child.iter_dnf_clauses())
for node in child_iter:
yield node
else:
yield self
def evaluate(self, input_dict):
"""Recursively evaluate this node.
This is an interface that should be defined in sub-classes. Node
evaluation does no checking of the validity of inputs; they should be
check before being passed here.
:param input_dict: A dictionary mapping expression symbols to the value
for which they should be subsituted in expression evaluation.
:type input_dict: Dict{:class:`str <python:str>`: truthy
:returns: The evaluation of the tree rooted at this node.
:rtype: :class:`bool <python:bool>`
"""
raise NotImplementedError(
'Expression tree nodes must implement evaluate().')
def _copy(self):
"""Recursively return a copy of the tree rooted at this node."""
raise NotImplementedError(
'Expression tree nodes must implement _copy()')
def to_cnf(self):
"""Return a transformed node, in conjunctive normal form.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all operators transformed to
consist only of NOTs, ANDs, and ORs.
:rtype: :class:`ExpressionTreeNode`
"""
prev_node = self.to_primitives()
if prev_node.is_cnf:
return prev_node
next_node = prev_node
while True:
prev_node = next_node
next_node = next_node.apply_de_morgans()
if next_node == prev_node:
break
prev_node = next_node.coalesce_negations()
while True:
prev_node = next_node
next_node = next_node.distribute_ors()
if next_node == prev_node:
break
while True:
prev_node = next_node
next_node = prev_node.apply_inverse_law()
if next_node == prev_node:
break
while True:
prev_node = next_node
next_node = prev_node.apply_idempotent_law()
if next_node == prev_node:
break
while True:
prev_node = next_node
next_node = prev_node.apply_identity_law()
if next_node == prev_node:
break
while True:
prev_node = next_node
next_node = prev_node.apply_idempotent_law()
if next_node == prev_node:
break
while True:
prev_node = next_node
next_node = prev_node.coalesce_negations()
if next_node == prev_node:
break
return next_node
def to_primitives(self):
"""Return a transformed node, containing only NOTs, ANDs, and ORs.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all operators transformed to
consist only of NOTs, ANDs, and ORs.
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement to_primitives()')
def coalesce_negations(self):
"""Return a transformed node, with consecutive negations coalesced.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all consecutive negations
compressed into the minimal number of equivalent negations (either
one or none).
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement coalesce_negations()')
def apply_de_morgans(self):
"""Return a transformed node, with De Morgan's Law applied.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all negated AND and OR operators
transformed, following De Morgan's Law.
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement apply_de_morgans()')
def apply_identity_law(self):
"""Return a transformed node, with the Identity Law applied.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
This transformation will achieve the following effects by applying the
Inverse Law to the *AND* and *OR* operators::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('A and 1').tree
>>> print(tree.apply_identity_law())
A
>>> tree = BooleanExpression('0 or B').tree
>>> print(tree.apply_identity_law())
B
It should also be noted that this transformation will also apply
the annihilator properties of the logical *AND* and *OR* operators. For
example::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('A and 0').tree
>>> print(tree.apply_identity_law())
0
>>> tree = BooleanExpression('1 or B').tree
>>> print(tree.apply_identity_law())
1
:returns: An expression tree node with AND and OR identities
simplified.
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement apply_identity_law()')
def apply_idempotent_law(self):
"""Returns a transformed node, with the Idempotent Law applied.
Since nodes are immutable, the returned node, and all descendants, are
new objects
:returns: An expression tree node with the Idempotent Law applied to
*AND* and *OR* operators.
:rtype: :class:`ExpressionTreeNode`
This transformation will apply the Idempotent Law to *AND* and *OR*
expressions involving repeated operands. Here are a few examples::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('A and A').tree
>>> print(tree.apply_idempotent_law())
A
>>> tree = BooleanExpression('~B or ~~~B').tree
>>> print(tree.apply_idempotent_law())
~
`----B
In the latter of the two above examples, we see that this
transformation will compare operands with negations condensed. This
transformation will also prune redundant operands from CNF and DNF
clauses. Let's take a look::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('A and B and B and C and ~C and ~~C \
and D').tree
>>> print(tree.apply_idempotent_law())
and
`----and
| `----and
| | `----and
| | | `----A
| | | `----B
| | `----C
| `----~
| `----C
`----D
"""
raise NotImplementedError(
'Expression tree nodes must implement apply_idempotent_law()')
def apply_inverse_law(self):
"""Return a transformed node, with the Inverse Law applied.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with the Inverse Law applied to
applicable clauses.
:rtype: :class:`ExpressionTreeNode`
This transformation will apply the Inverse Law to *AND* and *OR*
expressions involving the negated and non-negated forms of a variable.
Here are a few examples::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('~A and A').tree
>>> print(tree.apply_inverse_law())
0
>>> tree = BooleanExpression('B or !B').tree
>>> print(tree.apply_inverse_law())
1
Note that this transformation will **not** reduce expressions of
constants; the transformation :func:`apply_identity_law \
<tt.trees.tree_node.ExpressionTreeNode.apply_identity_law>` will
probably do what you want in this case, though.
This transformation will also reduce expressions in CNF or DNF that
contain negated and non-negated forms of the same symbol. Let's take a
look::
>>> from tt import BooleanExpression
>>> tree = BooleanExpression('A or B or C or ~B').tree
>>> print(tree.apply_inverse_law())
1
>>> tree = BooleanExpression('A and B and C and !B').tree
>>> print(tree.apply_inverse_law())
0
"""
raise NotImplementedError(
'Expression tree nodes must implement apply_inverse_law()')
def distribute_ands(self):
"""Return a transformed nodes, with ANDs recursively distributed across
ORed sub-expressions.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all applicable AND operators
distributed across ORed sub-expressions.
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement distribute_ands()')
def distribute_ors(self):
"""Return a transformed nodes, with ORs recursively distributed across
ANDed sub-expressions.
Since nodes are immutable, the returned node, and all descendants, are
new objects.
:returns: An expression tree node with all applicable OR operators
distributed across ANDed sub-expressions.
:rtype: :class:`ExpressionTreeNode`
"""
raise NotImplementedError(
'Expression tree nodes must implement distribute_ors()')
def __eq__(self, other):
raise NotImplementedError(
'Expression tree nodes must implement __eq__')
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self._str_helper()[:-1]
def _str_helper(self, depth=0, indent_size=_DEFAULT_INDENT_SIZE,
stem_list=[]):
"""Helper method for __str__."""
ret = ''
if depth > 0:
trunk = ('{}' + (indent_size - 1) * ' ') * (depth - 1)
trunk = trunk.format(*stem_list)
stem = '`' + (indent_size - 1) * '-'
ret += trunk + stem + self._symbol_name
else:
ret += self._symbol_name
ret += '\n'
l_child_stem = '|' if self._r_child is not None else ' '
if self._l_child is not None:
ret += self._l_child._str_helper(
depth=depth+1,
indent_size=indent_size,
stem_list=stem_list + [l_child_stem])
if self.r_child is not None:
ret += self.r_child._str_helper(
depth=depth+1,
indent_size=indent_size,
stem_list=stem_list + [' '])
return ret
def _get_op_strs(self, *ops):
"""Get the appropriate operator strings for the passed operators."""
if self.symbol_name in SYMBOLIC_OPERATOR_MAPPING:
return tuple(op.default_symbol_str for op in ops)
else:
return tuple(op.default_plain_english_str for op in ops)
class BinaryOperatorExpressionTreeNode(ExpressionTreeNode):
"""An expression tree node for binary operators."""
def __init__(self, operator_str, l_child, r_child):
super(BinaryOperatorExpressionTreeNode, self).__init__(
operator_str, l_child, r_child)
self._operator = OPERATOR_MAPPING[operator_str]
self._is_cnf = self._cnf_status()
self._is_dnf = self._dnf_status()
self._is_really_unary = False
self._non_negated_symbol_set = \
l_child._non_negated_symbol_set | r_child._non_negated_symbol_set
self._negated_symbol_set = \
l_child._negated_symbol_set | r_child._negated_symbol_set
@property
def operator(self):
"""The actual operator object wrapped in this node.
:type: :class:`BooleanOperator\
<tt.definitions.operators.BooleanOperator>`
"""
return self._operator
def evaluate(self, input_dict):
return self.operator.eval_func(
self.l_child.evaluate(input_dict),
self.r_child.evaluate(input_dict))
def _copy(self):
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child._copy(),
self._r_child._copy())
def to_primitives(self):
not_str, and_str, or_str = self._get_op_strs(
TT_NOT_OP, TT_AND_OP, TT_OR_OP)
if self._operator == TT_IMPL_OP:
return BinaryOperatorExpressionTreeNode(
or_str,
UnaryOperatorExpressionTreeNode(
not_str, self._l_child.to_primitives()),
self._r_child.to_primitives())
elif self._operator == TT_XOR_OP:
new_l_child = self._l_child.to_primitives()
new_r_child = self._r_child.to_primitives()
return BinaryOperatorExpressionTreeNode(
or_str,
BinaryOperatorExpressionTreeNode(
and_str,
new_l_child,
UnaryOperatorExpressionTreeNode(not_str, new_r_child)),
BinaryOperatorExpressionTreeNode(
and_str,
UnaryOperatorExpressionTreeNode(not_str, new_l_child),
new_r_child))
elif self._operator == TT_XNOR_OP:
new_l_prim = self._l_child.to_primitives()
new_r_prim = self._r_child.to_primitives()
return BinaryOperatorExpressionTreeNode(
or_str,
BinaryOperatorExpressionTreeNode(
and_str,
new_l_prim,
new_r_prim),
BinaryOperatorExpressionTreeNode(
and_str,
UnaryOperatorExpressionTreeNode(not_str, new_l_prim),
UnaryOperatorExpressionTreeNode(not_str, new_r_prim)))
elif self._operator == TT_AND_OP:
return BinaryOperatorExpressionTreeNode(
and_str,
self._l_child.to_primitives(), self._r_child.to_primitives())
elif self._operator == TT_NAND_OP:
new_l_child = self._l_child.to_primitives()
new_r_child = self._r_child.to_primitives()
return BinaryOperatorExpressionTreeNode(
or_str,
UnaryOperatorExpressionTreeNode(not_str, new_l_child),
UnaryOperatorExpressionTreeNode(not_str, new_r_child))
elif self._operator == TT_OR_OP:
return BinaryOperatorExpressionTreeNode(
or_str,
self._l_child.to_primitives(), self._r_child.to_primitives())
elif self._operator == TT_NOR_OP:
new_l_child = self._l_child.to_primitives()
new_r_child = self._r_child.to_primitives()
return BinaryOperatorExpressionTreeNode(
and_str,
UnaryOperatorExpressionTreeNode(not_str, new_l_child),
UnaryOperatorExpressionTreeNode(not_str, new_r_child))
def coalesce_negations(self):
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.coalesce_negations(),
self._r_child.coalesce_negations())
def apply_de_morgans(self):
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_de_morgans(),
self._r_child.apply_de_morgans())
def apply_identity_law(self):
op_is_and = self._operator == TT_AND_OP
op_is_or = self._operator == TT_OR_OP
new_l_child = self._l_child.apply_identity_law()
if new_l_child.symbol_name == '1':
if op_is_and:
return self._r_child.apply_identity_law()
elif op_is_or:
return OperandExpressionTreeNode('1')
elif new_l_child.symbol_name == '0':
if op_is_and:
return OperandExpressionTreeNode('0')
elif op_is_or:
return self._r_child.apply_identity_law()
new_r_child = self._r_child.apply_identity_law()
if new_r_child.symbol_name == '1':
if op_is_and:
return new_l_child
elif op_is_or:
return OperandExpressionTreeNode('1')
elif new_r_child.symbol_name == '0':
if op_is_and:
return OperandExpressionTreeNode('0')
elif op_is_or:
return new_l_child
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
new_l_child,
new_r_child)
def apply_idempotent_law(self):
negations_applied = self.coalesce_negations()
if negations_applied._is_cnf and negations_applied._is_dnf:
negated_symbols_added = set()
non_negated_symbols_added = set()
filtered_clauses = deque()
total_clause_count = 0
clause_iter = (negations_applied.iter_cnf_clauses() if
self._operator == TT_AND_OP else
negations_applied.iter_dnf_clauses())
for clause in clause_iter:
total_clause_count += 1
if isinstance(clause, OperandExpressionTreeNode):
if clause.symbol_name in non_negated_symbols_added:
continue
non_negated_symbols_added |= clause.non_negated_symbol_set
filtered_clauses.append(clause._copy())
elif clause._l_child.symbol_name not in negated_symbols_added:
negated_symbols_added |= clause.negated_symbol_set
filtered_clauses.append(clause._copy())
if len(filtered_clauses) == total_clause_count:
# no redundant operands were pruned
return self._copy()
while len(filtered_clauses) > 1:
filtered_clauses.appendleft(
BinaryOperatorExpressionTreeNode(
self.symbol_name,
filtered_clauses.popleft(),
filtered_clauses.popleft()))
return filtered_clauses.pop()
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_idempotent_law(),
self._r_child.apply_idempotent_law())
def apply_inverse_law(self):
negations_applied = self.coalesce_negations()
if negations_applied._is_cnf and negations_applied._is_dnf:
if self._negated_symbol_set & self._non_negated_symbol_set:
return OperandExpressionTreeNode(
'1' if self._operator == TT_OR_OP else '0')
elif self._is_cnf:
and_str = self.symbol_name
inverted_clause_count = 0
transformed_clauses = deque()
for clause in self.iter_cnf_clauses():
if clause.negated_symbol_set & clause.non_negated_symbol_set:
inverted_clause_count += 1
transformed_clauses.append(OperandExpressionTreeNode('1'))
else:
transformed_clauses.append(clause._copy())
if not inverted_clause_count:
# we didn't change anything, so just return ourselves
return self._copy()
while len(transformed_clauses) > 1:
transformed_clauses.append(
BinaryOperatorExpressionTreeNode(
and_str,
transformed_clauses.popleft(),
transformed_clauses.popleft()))
return transformed_clauses.pop()
elif self._is_dnf:
or_str = self.symbol_name
inverted_clause_count = 0
transformed_clauses = deque()
for clause in self.iter_dnf_clauses():
if clause.negated_symbol_set & clause.non_negated_symbol_set:
inverted_clause_count += 1
transformed_clauses.append(OperandExpressionTreeNode('0'))
else:
transformed_clauses.append(clause._copy())
if not inverted_clause_count:
# we didn't change anything, so just return ourselves
return self._copy()
while len(transformed_clauses) > 1:
transformed_clauses.append(
BinaryOperatorExpressionTreeNode(
or_str,
transformed_clauses.popleft(),
transformed_clauses.popleft()))
return transformed_clauses.pop()
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_inverse_law(),
self._r_child.apply_inverse_law())
def distribute_ands(self):
if self._operator == TT_AND_OP:
(or_str,) = self._get_op_strs(TT_OR_OP)
and_str = self.symbol_name
if (isinstance(self._r_child, BinaryOperatorExpressionTreeNode) and
self._r_child.operator == TT_OR_OP):
child_to_distribute = self._l_child.distribute_ands()
child_distributed_upon = \
self._r_child._l_child.distribute_ands()
child_to_be_distributed_upon = \
self._r_child._r_child.distribute_ands()
return BinaryOperatorExpressionTreeNode(
or_str,
BinaryOperatorExpressionTreeNode(
and_str,
child_to_distribute,
child_distributed_upon).distribute_ands(),
BinaryOperatorExpressionTreeNode(
and_str,
child_to_distribute,
child_to_be_distributed_upon).distribute_ands())
elif (isinstance(self._l_child, BinaryOperatorExpressionTreeNode)
and self._l_child.operator == TT_OR_OP):
child_to_distribute = self._r_child.distribute_ands()
child_distributed_upon = \
self._l_child._l_child.distribute_ands()
child_to_be_distributed_upon = \
self._l_child._r_child.distribute_ands()
return BinaryOperatorExpressionTreeNode(
or_str,
BinaryOperatorExpressionTreeNode(
and_str,
child_distributed_upon,
child_to_distribute).distribute_ands(),
BinaryOperatorExpressionTreeNode(
and_str,
child_to_be_distributed_upon,
child_to_distribute).distribute_ands())
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.distribute_ands(),
self._r_child.distribute_ands())
def distribute_ors(self):
if self._operator == TT_OR_OP:
(and_str,) = self._get_op_strs(TT_AND_OP)
or_str = self.symbol_name
if (isinstance(self._r_child, BinaryOperatorExpressionTreeNode) and
self._r_child.operator == TT_AND_OP):
child_to_distribute = self._l_child.distribute_ors()
child_distributed_upon = \
self._r_child._l_child.distribute_ors()
child_to_be_distributed_upon = \
self._r_child._r_child.distribute_ors()
return BinaryOperatorExpressionTreeNode(
and_str,
BinaryOperatorExpressionTreeNode(
or_str,
child_to_distribute,
child_distributed_upon).distribute_ors(),
BinaryOperatorExpressionTreeNode(
or_str,
child_to_distribute,
child_to_be_distributed_upon).distribute_ors())
elif (isinstance(self._l_child, BinaryOperatorExpressionTreeNode)
and self._l_child.operator == TT_AND_OP):
child_to_distribute = self._r_child.distribute_ors()
child_distributed_upon = \
self._l_child._l_child.distribute_ors()
child_to_be_distributed_upon = \
self._l_child._r_child.distribute_ors()
return BinaryOperatorExpressionTreeNode(
and_str,
BinaryOperatorExpressionTreeNode(
or_str,
child_distributed_upon,
child_to_distribute).distribute_ors(),
BinaryOperatorExpressionTreeNode(
or_str,
child_to_be_distributed_upon,
child_to_distribute).distribute_ors())
return BinaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.distribute_ors(),
self._r_child.distribute_ors())
def __eq__(self, other):
if isinstance(other, BinaryOperatorExpressionTreeNode):
return (self._operator == other._operator and
self._l_child == other._l_child and
self._r_child == other._r_child)
elif isinstance(other, ExpressionTreeNode):
return False
else:
return NotImplemented
def _cnf_status(self):
"""Helper to determine CNF status of the tree rooted at this node.
:returns: True if the tree rooted at this node is in conjunctive
normal form, otherwise False.
:rtype: :class:`bool <python:bool>`
"""
if not self._l_child.is_cnf or not self._r_child.is_cnf:
return False
if self._operator != TT_AND_OP and self._operator != TT_OR_OP:
return False
if self._operator == TT_OR_OP:
if isinstance(self._l_child, BinaryOperatorExpressionTreeNode):
if self._l_child._operator != TT_OR_OP:
return False
if isinstance(self._r_child, BinaryOperatorExpressionTreeNode):
if self._r_child.operator != TT_OR_OP:
return False
return True
def _dnf_status(self):
"""Helper to determine DNF status of the tree rooted at this node.
:returns: True if the tree rooted at this node is in disjunctive
normal form, otherwise False.
:rtype: :class:`bool <python:bool>`
"""
if not self._l_child.is_dnf or not self._r_child.is_dnf:
return False
if self._operator != TT_AND_OP and self._operator != TT_OR_OP:
return False
if self._operator == TT_AND_OP:
if isinstance(self._l_child, BinaryOperatorExpressionTreeNode):
if self._l_child._operator != TT_AND_OP:
return False
if isinstance(self._r_child, BinaryOperatorExpressionTreeNode):
if self._r_child.operator != TT_AND_OP:
return False
return True
class UnaryOperatorExpressionTreeNode(ExpressionTreeNode):
"""An expression tree node for unary operators."""
def __init__(self, operator_str, l_child):
super(UnaryOperatorExpressionTreeNode, self).__init__(
operator_str, l_child)
self._operator = OPERATOR_MAPPING[operator_str]
self._is_cnf = isinstance(self.l_child, OperandExpressionTreeNode)
self._is_dnf = self._is_cnf
self._is_really_unary = l_child._is_really_unary
if self._is_really_unary:
# this node has the opposite of its children
self._non_negated_symbol_set, self._negated_symbol_set = (
set(l_child._negated_symbol_set),
set(l_child._non_negated_symbol_set))
else:
self._non_negated_symbol_set, self._negated_symbol_set = (
set(l_child._non_negated_symbol_set),
set(l_child._negated_symbol_set))
@property
def operator(self):
"""The actual operator object wrapped in this node.
:type: :class:`BooleanOperator\
<tt.definitions.operators.BooleanOperator>`
"""
return self._operator
def evaluate(self, input_dict):
return self.operator.eval_func(
self.l_child.evaluate(input_dict))
def _copy(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name, self._l_child._copy())
def to_primitives(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name, self._l_child.to_primitives())
def coalesce_negations(self):
if isinstance(self._l_child, UnaryOperatorExpressionTreeNode):
return self._l_child._l_child.coalesce_negations()
elif self._l_child.symbol_name == '0':
return OperandExpressionTreeNode('1')
elif self._l_child.symbol_name == '1':
return OperandExpressionTreeNode('0')
else:
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.coalesce_negations())
def apply_de_morgans(self):
if isinstance(self._l_child, BinaryOperatorExpressionTreeNode):
binary_node = self._l_child
op = binary_node._operator
not_str, and_str, or_str = self._get_op_strs(
TT_NOT_OP, TT_AND_OP, TT_OR_OP)
notted_l_child = UnaryOperatorExpressionTreeNode(
not_str, binary_node._l_child).apply_de_morgans()
notted_r_child = UnaryOperatorExpressionTreeNode(
not_str, binary_node._r_child).apply_de_morgans()
if op == TT_AND_OP:
return BinaryOperatorExpressionTreeNode(
or_str, notted_l_child, notted_r_child)
elif op == TT_OR_OP:
return BinaryOperatorExpressionTreeNode(
and_str, notted_l_child, notted_r_child)
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_de_morgans())
def apply_identity_law(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_identity_law())
def apply_idempotent_law(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_idempotent_law())
def apply_inverse_law(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.apply_inverse_law())
def distribute_ands(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.distribute_ands())
def distribute_ors(self):
return UnaryOperatorExpressionTreeNode(
self.symbol_name,
self._l_child.distribute_ors())
def __eq__(self, other):
if isinstance(other, UnaryOperatorExpressionTreeNode):
return self._l_child == other._l_child
elif isinstance(other, ExpressionTreeNode):
return False
else:
return NotImplemented
class OperandExpressionTreeNode(ExpressionTreeNode):
"""An expression tree node for operands.
Nodes of this type will always be leaves in an expression tree.
"""
def __init__(self, operand_str):
super(OperandExpressionTreeNode, self).__init__(operand_str)
self._is_cnf = True
self._is_dnf = True
self._is_really_unary = True
self._non_negated_symbol_set = {self.symbol_name}
self._negated_symbol_set = set()
def evaluate(self, input_dict):
if self.symbol_name == '0':
return False
elif self.symbol_name == '1':
return True
else:
return input_dict[self.symbol_name]
def _copy(self):
return OperandExpressionTreeNode(self.symbol_name)
def to_primitives(self):
return OperandExpressionTreeNode(self.symbol_name)
def coalesce_negations(self):
return OperandExpressionTreeNode(self.symbol_name)
def apply_de_morgans(self):
return OperandExpressionTreeNode(self.symbol_name)
def apply_identity_law(self):
return OperandExpressionTreeNode(self.symbol_name)
def apply_idempotent_law(self):
return OperandExpressionTreeNode(self.symbol_name)
def apply_inverse_law(self):
return OperandExpressionTreeNode(self.symbol_name)
def distribute_ands(self):
return OperandExpressionTreeNode(self.symbol_name)
def distribute_ors(self):
return OperandExpressionTreeNode(self.symbol_name)
def __eq__(self, other):
if isinstance(other, OperandExpressionTreeNode):
return self.symbol_name == other.symbol_name
elif isinstance(other, ExpressionTreeNode):
return False
else:
return NotImplemented
| {
"repo_name": "welchbj/tt",
"path": "tt/trees/tree_node.py",
"copies": "1",
"size": "41273",
"license": "mit",
"hash": 1425965594857478700,
"line_mean": 35.2361720808,
"line_max": 79,
"alpha_frac": 0.5726019432,
"autogenerated": false,
"ratio": 4.4475215517241375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5520123494924137,
"avg_score": null,
"num_lines": null
} |
# >> a = Node.append(None, 2)
# >> a = Node.prepend(a, 1)
# >> print a
# [ 1 2 ]
# >> b = Node.generateSequence(5)
# >> print b
# [ 1 2 3 4 5 ]
class Node:
def __init__(self, value):
if value <= 0:
self.value = 0
self.next = None
@staticmethod
def generateSequence(self, value):
tempNode = None
for i in range(value,0,-1):
tempNode = prepend(tempNode,i)
return tempNode
@staticmethod
def append(self, Node, value):
# None
Node = prepend(Node, value)
# 1
# 2 3 4 5 1
#end of list
ref = Node
while Node.next:
Node = Node.next
Node.next = ref
temp = ref.next
ref.next = None
return temp
#end of the list
###
tempNode = Node(value)
Node.next = tempNode
return ref.next
@staticmethod
def prepend(self, Node, value):
temp = Node(value)
temp.next = Node
return temp
# a = Node.prepend(None,2)
| {
"repo_name": "bourneagain/pythonBytes",
"path": "whatsapp_interview1.py",
"copies": "1",
"size": "1231",
"license": "mit",
"hash": 2247321914042706200,
"line_mean": 18.234375,
"line_max": 42,
"alpha_frac": 0.4281072299,
"autogenerated": false,
"ratio": 4.117056856187291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504516408608729,
"avg_score": null,
"num_lines": null
} |
# anodi
from backports import inspect
empty = inspect.Signature.empty
def returns (annotation):
"""
Decorator to add ``annotation`` to ``func``'s ``return``
annotation, as though it were a Python 3 ``-> ...`` annotation.
>>> from anodi import returns
>>> @returns(int)
... def example ():
... pass
...
>>> example.__annotations__
{'return': <type 'int'>}
"""
def annotate (func):
func.__annotations__ = getattr(func, '__annotations__', {})
if not annotation is empty:
func.__annotations__['return'] = annotation
return func
return annotate
def annotated (func=None, returns=empty):
"""
Decorator to treat ``func``'s default args as a combination of
annotations and default values, migrating the annotations to
``func.__annotations__``, leaving only the defaults in
``__defaults__``).
The optional ``returns`` keyword parameter is placed in the
resulting ``__annotations__`` dict.
Each default value must be a tuple, ``(annotation, default)``. To
supply an unannotated parameter with a default value, use the
``empty`` marker object. To supply an annotation without a
default value, use a 1-tuple: ``(annotation,)``.
Note that the Python 2.x rules prohibiting non-default parameters
from coming after defaults still apply, but we don't enforce those
rules. The effect of using the ``(annotation,)`` form *after*
using the ``(annotation, default)`` form is likely to be
surprising, at best.
You may specify an unannotated parameter by using an empty tuple
as its default value. This is to allow placing unannotated
parameters after annotated parameters. Ordinarily, this would not
be allowed, since the annotated parameter would mark the start of
default values, requiring defaults on all subsequent parameters.
We do *not* support nested tuple parameters.
We also don't yet have a way to add annotations to the ``*args``
or ``**kwargs`` catch-all parameters, since they don't take
defaults.
Example:
>>> from anodi import annotated, empty
>>> @annotated
... def example (a, b, c=(int,), d=(), e=(empty, "hi")):
... pass
...
>>> example.__annotations__
{'c': <type 'int'>}
>>> example.__defaults__
('hi',)
>>> @annotated(returns=int)
... def example (a, b, c=(int,), d=(), e=(empty, "hi")):
... pass
...
>>> example.__annotations__
{'c': <type 'int'>, 'return': <type 'int'>}
>>> example.__defaults__
('hi',)
"""
def annotate (func):
func.__annotations__ = getattr(func, '__annotations__', {})
if not returns == empty:
func.__annotations__['return'] = returns
defaults = func.__defaults__
if defaults:
spec = inspect.getfullargspec(func)
# ___TODO:___ support *args, **kwargs annotation?
# extract annotations
nanno = len(defaults)
for (i, name) in enumerate(spec.args[-nanno:]):
if len(defaults[i]) < 1 or defaults[i][0] is empty:
continue
func.__annotations__[name] = defaults[i][0]
# prune annotations, leaving only defaults
defaults = tuple((d[1]
for d in func.__defaults__
if len(d) > 1))
# use ``None`` if there are no defaults left, since that's
# how a function without any defaults would come out.
func.__defaults__ = defaults or None
return func
# if we were called without a ``results`` argument, then we're
# directly decorating ``func``:
if returns == empty:
return annotate(func)
# otherwise, we're indirectly decorating, via ``annotate``:
return annotate
| {
"repo_name": "agoraplex/anodi",
"path": "anodi/__init__.py",
"copies": "1",
"size": "3973",
"license": "bsd-3-clause",
"hash": 3316148378849363000,
"line_mean": 33.850877193,
"line_max": 70,
"alpha_frac": 0.5766423358,
"autogenerated": false,
"ratio": 4.545766590389016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5622408926189018,
"avg_score": null,
"num_lines": null
} |
"""An offshore wind farm model
@moduleauthor:: Juan P. Murcia <jumu@dtu.dk>
"""
import numpy as np
MATPLOTLIB = True
try:
import matplotlib.pyplot as plt
except Exception as e:
MATPLOTLIB = False
print("WARNING: Matplotlib isn't installed correctly:", e)
from .WindTurbine import WindTurbineDICT
from windIO.Plant import WTLayout
import warnings
class WindTurbineList(list):
"""A simple list class that can also act as a single element when needed.
Accessing one of the attribute of this list will get the first element of the
list attributes.
Note
----
We assume here that if a model call this intense as if it's one wind
turbine, the user has been clever enough to pass as input a wind farm with
identical turbines.
"""
def __getattr__(self, key):
#TODO: make some checks to catch possible bugs when the turbines are not similar.
return getattr(self.__getitem__(0), key)
def names(self):
return [getattr(w, 'name') for w in self]
class WindFarm(object):
def __init__(self, name=None, yml=None, coordFile=None, WT=None):
"""Initializes a WindFarm object.
The initialization can be done using a `windIO` yml file or using a
coodFile + WindTurbine instance.
Parameters
----------
name: str, optional
WindFarm name
yml: str, optional
A WindIO `yml` file containing the description of the farm
coordFile: str, optional
Wind Farm layout coordinates text file.
WindTurbine: WindTurbine, optional
WindTurbine object (only one type per WindFarm)
"""
if (coordFile):
coordArray = np.loadtxt(coordFile)
self.pos = self.sanitize_position(coordArray) # np.array(nWT, 2)
self.nWT = self.pos.shape[0]
self.WT = WindTurbineList([WT for i in range(self.nWT)])
if name:
self.name = name
else:
self.name = 'Unknown wind farm'
elif (yml):
self.wf = WTLayout(yml)
self.pos = self.sanitize_position(self.wf.positions)
self.nWT = self.pos.shape[0]
self.WT = WindTurbineList([WindTurbineDICT(wt, self.wf[wt['turbine_type']]) for wt in self.wf.wt_list])
self.name = self.wf.name
# We generate a wind turbine list
self.update_position(self.pos)
def sanitize_position(self, pos):
"""Position array should be ndarray([n_wt, 2]) or ndarray([n_wt, 3])
"""
if pos.shape[1] == 2 or pos.shape[1] == 3:
if pos.shape[0] >1 and pos.shape[0] <4:
warnings.warn('warning, make sure that this position array is oriented in ndarray([n_wt, 2]) or ndarray([n_wt, 3])\n%s' % pos)
return pos
elif pos.shape[0] == 2 or pos.shape[0] == 3:
return pos.T
else:
raise Exception('Strange position array', pos.shape)
def update_position(self, pos):
pos = self.sanitize_position(pos)
# XYZ position of the rotors
if pos.shape[1] == 2:
self.xyz = np.vstack([pos.T, self.H]).T # xyz is ndarray([nWT, 3])
# Vector from iWT to jWT: self.vectWTtoWT[:,i,j] [3, nWT, nWT]
self.vectWTtoWT = np.swapaxes([self.xyz.T -
np.repeat(np.atleast_2d(self.xyz[i,:]).T, self.nWT, axis=1)
for i in range(self.nWT)], 0, 1)
def rep_str(self):
return "%s has %s %s wind turbines, with a total capacity of %4.1f MW"%(
self.name, self.nWT, self.WT.turbine_type, sum(self.rated_power)/1E3)
def __repr__(self):
sep = "-------------------------------------------------------------"
return '\n'.join([sep, self.rep_str(), sep])
def _repr_html_(self):
sep = "<br>"
return '\n'.join([sep, self.rep_str(), sep])
def turbineDistance(self, wd):
"""Computes the WT to WT distance in flow coordinates
ranks the most of most upstream turbines
Parameters
----------
wd: float
Wind direction in degrees
Returns
-------
distFlowCoord: Vector from iWT to jWT: self.vectWTtoWT[:,i,j]
idWT: ndarray(int)
turbine index array
"""
angle = np.radians(270.-wd)
ROT = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
distFlowCoord = np.einsum('ij,jkl->ikl', ROT, self.vectWTtoWT[:2, :, :])
nDownstream = [(distFlowCoord[0, i, :] < 0).sum() for i in range(self.nWT)]
ID0 = np.argsort(nDownstream)
return distFlowCoord, nDownstream, ID0
def toFlowCoord(self, wd, vect):
"""Rotates a 2xN np.array to flow coordinates
Parameters
----------
wd: float
Wind direction in degrees
vect: ndarray
Vector or Matrix 2xN
Returns
-------
vect: ndarray
Vector or Matrix 2xN
"""
angle = np.radians(270.-wd)
ROT = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
return np.dot(ROT, vect)
def get_T2T_gl_coord(self):
"""
Function to calculated the turbine to turbine distances in the global
coordinate system. (slower than version 2).
Parameters
----------
wt_layout GenericWindFarmTurbineLayout (FusedWind)
Returns
-------
x_g x component of distance between Tj and Ti := x_g[i,j]
y_g y component of distance between Tj and Ti := y_g[i,j]
z_g z component of distance between Tj and Ti := z_g[i,j]
"""
# Compute the turbine to turbine vector in global coordinates
x_g = np.zeros([self.nWT, self.nWT])
y_g = np.zeros([self.nWT, self.nWT])
z_g = np.zeros([self.nWT, self.nWT])
for i in range(self.nWT):
for j in range(self.nWT):
x_g[i,j] = self.xyz[j,0] - self.xyz[i,0]
y_g[i,j] = self.xyz[j,1] - self.xyz[i,1]
z_g[i,j] = self.xyz[j,2] - self.xyz[i,2]
return x_g,y_g,z_g
def get_T2T_gl_coord2(self):
"""
Function to calculated the turbine to turbine distances in the global
coordinate system. (faster).
Parameters
----------
wt_layout GenericWindFarmTurbineLayout (FusedWind)
Returns
-------
x_g x component of distance between Tj and Ti := x_g[i,j]
y_g y component of distance between Tj and Ti := y_g[i,j]
z_g z component of distance between Tj and Ti := z_g[i,j]
"""
x_g, y_g, z_g = self.vectWTtoWT
return x_g, y_g, z_g
def plot(self, WT_num=False):
""" # TODO
"""
if MATPLOTLIB:
x = (self.pos[:, 0] - min(self.pos[:, 0])) / (2. * self.WT.R)
y = (self.pos[:, 1] - min(self.pos[:, 1])) / (2. * self.WT.R)
fig, ax = plt.subplots()
ax.scatter(x, y, c='black')
if WT_num:
for i in range(0, self.nWT):
ax.annotate(i, (x[i], y[i]))
elif not WT_num:
print('No annotation of turbines')
ax.set_xlabel('x/D [-]')
ax.set_ylabel('y/D [-]')
ax.axis('equal')
ax.set_title(self.name)
return fig, ax
def plot_order(self, wd):
""" # TODO
"""
if MATPLOTLIB:
x = (self.pos[0, :] - min(self.pos[0, :])) / 1000
y = (self.pos[1, :] - min(self.pos[1, :])) / 1000
dist, nDownstream, idWT = self.turbineDistance(wd)
fig, ax = plt.subplots()
ax.scatter(x, y, c='black')
for i in range(0, self.nWT):
ax.annotate(int(idWT[i]), (x[i], y[i]))
ax.set_xlabel('x [km]')
ax.set_ylabel('y [km]')
ax.set_title(self.name+' Wind direction '+str(wd))
return fig, ax
def __getattr__(self, key):
"""Give access to a list of the properties of the turbine
Parameters
----------
key: str
The parameter to return
Returns
-------
parameters: list
The parameter list of the turbines
Example
-------
> wf = WindFarm(name='farm_name', yml=filename)
> wf.rotor_diameter
[80.0, 80.0, 80.0, 80.0, 80.0, ..., 80.0]
"""
# Detect the edge case if key is 'WT'
try:
super(WindFarm, self).__getattribute__(key)
except Exception as e1:
try:
return [getattr(wt, key) for wt in self.WT]
except Exception as e2:
raise e1
| {
"repo_name": "DTUWindEnergy/FUSED-Wake",
"path": "fusedwake/WindFarm.py",
"copies": "1",
"size": "8900",
"license": "mit",
"hash": 2069431523932385500,
"line_mean": 32.8403041825,
"line_max": 142,
"alpha_frac": 0.5308988764,
"autogenerated": false,
"ratio": 3.4711388455538223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4502037721953822,
"avg_score": null,
"num_lines": null
} |
"""An offshore wind farm model
@moduleauthor:: Juan P. Murcia <jumu@dtu.dk>
"""
import numpy as np
try:
import scipy as sp
from scipy.interpolate import interp1d as interpolator
except Exception as e:
print(e.message)
# from scipy.interpolate import pchipInterpolator as interpolator
class WindTurbine(object):
"""Wind Turbine instance
Defines WT parameters and operational curves
"""
def __init__(self, name, refCurvesFile, H, R, CT_idle=0.053):
"""Initializes a WindTurbine object
Parameters
----------
name: str
Wind turbine name
refCurvesFile: str
Power and thrust coefficient curves text file.
H: float
Wind turbine hub height [m]
R: float
Radius [m]
Returns
-------
WindTurbine (WindTurbine)
"""
self.name = name
self.H = H
self.R = R
self.rotor_diameter = 2*R
refCurvesArray=np.loadtxt(refCurvesFile,delimiter=', ',skiprows=5)
self.refCurvesArray = refCurvesArray
self.CT_idle = CT_idle
self.c_t_idle = self.CT_idle
self.wt_init()
def wt_init(self):
self.ref_u = self.refCurvesArray[:,0]
self.ref_P = self.refCurvesArray[:,1]
self.power_curve = self.ref_P
self.ref_CT = self.refCurvesArray[:,2]
self.c_t_curve = self.ref_CT
self.u_cutin = self.ref_u[0]
self.cut_in_wind_speed = self.u_cutin
self.u_cutout = self.ref_u[-1]
self.cut_out_wind_speed = self.u_cutout
self.P_rated = np.max(self.ref_P)
self.PCI = interpolator(self.ref_u, self.ref_P)
self.CTCI = interpolator(self.ref_u, self.ref_CT)
index = np.nonzero(self.ref_P==self.P_rated)[0][0]
self.PCI_u = interpolator(
self.ref_P[:index+1],self.ref_u[:index+1])
self.u_rated = np.float(self.PCI_u(self.P_rated))
def __repr__(self):
print("-------------------------------------------------------------")
print("\t %s" % (self.name))
print("-------------------------------------------------------------")
print("\nHeight \t %s [m]\nRadius \t %s [m] \n" %(self.H, self.R))
print("-------------------------------------------------------------")
print("\t u [m/s] \t P [kW] \t CT [-]")
for row in self.refCurvesArray:
print('\t %0.0f \t\t %0.0f \t\t %0.3f'%(row[0],row[1]/1000.0,row[2]))
print("-------------------------------------------------------------")
return ''
def get_P(self,u):
"""Computes the Power of the WindTurbine
at the undisturbed wind speed
Parameters
----------
u: float
Undisturbed wind speed
Returns
-------
Power: float
WindTurbine object's power
"""
#return np.interp(u, self.ref_u, self.ref_P, left=0)
return ((u>=self.u_cutin)&(u<=self.u_cutout))*self.PCI(u)
def get_u(self, P):
"""Computes the undisturbed wind speed of the WindTurbine
given a power under rated power
Parameters
----------
P: float
Power
Returns
-------
u: float
Undisturbed wind speed
"""
return ((P >= 0.0) & (P <= self.P_rated)) * self.PCI_u(P)
def get_CT(self,u):
"""Computes the thrust coefficient of the WindTurbine
at the undisturbed wind speed
Parameters
----------
u: float
Undisturbed wind speed
Returns
-------
CT: float
Thrust coefficient
"""
#return np.interp(u, self.ref_u, self.ref_CT)
return ((u>=self.u_cutin)&(u<=self.u_cutout))*self.CTCI(u) + \
((u<self.u_cutin)|(u>self.u_cutout))*self.CT_idle
def get_a(self,CT):
"""Computes the axial velocity deficit coefficient at the rotor disc
bosed on the WindTurbine's thrust coefficient
.. math::
a = \\frac{1}{2} \\left( 1 - \\sqrt{1-C_T} \\right)
Parameters
----------
CT: float
Thrust coefficient
Returns
-------
a: float
Axial velocity deficit coefficient at the rotor disc
"""
return 0.5 * ( 1. - np.sqrt(1.-CT))
class WindTurbineDICT(WindTurbine):
"""Wind Turbine instance
Defines WT parameters and operational curves from a windIO dictionary
"""
def __init__(self, wt=None, wt_type=None):
"""Initializes a WindTurbine object
Parameters
----------
wt: dict
a WindIO dictionary containing the description of the turbine
Returns
-------
WindTurbine (WindTurbine)
"""
self.data = wt_type
self.data.update(wt)
self.wt_init(wt, wt_type)
def wt_init(self, wt, wt_type):
self.name = wt['name']
self.turbine_type = wt['turbine_type']
self.position = wt['position']
self.type = wt_type['name']
self.H = wt_type['hub_height']
self.R = wt_type['rotor_diameter'] / 2.0
if 'c_t_idle' in wt:
self.CT_idle = wt_type['c_t_idle']
else:
self.CT_idle = 0.056
self.power_factor = 1000.0 # <- Juan Pablo is using W as a basis to define power
if 'power_curve' in wt_type:
self.pc = np.array(wt_type['power_curve'])
self.ctc = np.array(wt_type['c_t_curve'])
elif 'power_curves' in wt_type: #TODO fix this??
wt_type['power_curve'] = wt_type['power_curves']
wt_type['c_t_curve'] = wt_type['c_t_curves']
self.pc = np.array(wt_type['power_curves'])
self.ctc = np.array(wt_type['c_t_curves'])
else:
raise Exception('No power curve found')
self.u_cutin = wt_type['cut_in_wind_speed']
self.u_cutout = wt_type['cut_out_wind_speed']
self.P_rated = wt_type['rated_power'] * self.power_factor
self.PCI = interpolator(self.pc[:,0], self.pc[:,1]*self.power_factor)
self.CTCI = interpolator(self.ctc[:,0], self.ctc[:,1])
index = np.nonzero(self.pc[:,1]*self.power_factor==self.P_rated)[0][0]
self.PCI_u = interpolator(self.pc[:index+1,1] * self.power_factor, self.pc[:index+1,0])
self.u_rated = wt_type['rated_wind_speed']
self.refCurvesArray = np.vstack([self.pc[:,0].T,
self.pc[:,1].T*self.power_factor,
self.CTCI(self.pc[:,0].T)]).T
def __getattr__(self, key):
"""Give access to a list of the properties of the turbine
Parameters
----------
key: str
The parameter to return
Returns
-------
parameters: list
The parameter list of the turbines
"""
return self.data[key]
'''
v80 = WindTurbine('Vestas v80 2MW offshore','V80_2MW_offshore.dat',70,40)
v80.display_windTurbine()
print(v80.get_P(u=np.array([10.5])))
print(v80.get_CT(u=np.array([10.5])))
'''
| {
"repo_name": "DTUWindEnergy/FUSED-Wake",
"path": "fusedwake/WindTurbine.py",
"copies": "1",
"size": "7224",
"license": "mit",
"hash": -5740627513500712000,
"line_mean": 29.1,
"line_max": 95,
"alpha_frac": 0.5150885936,
"autogenerated": false,
"ratio": 3.4630872483221475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44781758419221473,
"avg_score": null,
"num_lines": null
} |
# An old-skool scrolling game for the BBC microbit.
# You are in a spaceship decending to the surface of an unknown a planet. Use the buttons to manoeuvre left and
# right to avoid the accumulated space junk that is suspended in orbit around he planet.
# Your spaceship is the bright dot on the top line of the display. The the less bright dots are the space junk.
from microbit import *
import random
# Some constants
SCREEN_WIDTH = 5
SCREEN_HEIGHT = 5
SECONDS_PER_STEP = 1
POLLS_PER_SECOND = 10
SCENERY_LENGTH_IN_SCREENS = 5 # Tune this down to fit in RAM
NUM_OBSTACLES = 100
def make_obstacles(width, height, num):
obstacles = set()
possible_locations = set()
for x in range(width):
for y in range(3, height):
possible_locations.add((x, y))
for i in range(num):
if not possible_locations:
break
x, y = random.choice(list(possible_locations))
obstacles.add((x,y))
possible_locations.remove((x, y))
possible_locations.discard((x-1, y))
possible_locations.discard((x-1, y-1))
possible_locations.discard((x-1, y+1))
possible_locations.discard((x, y-1))
possible_locations.discard((x, y+1))
possible_locations.discard((x+1, y))
possible_locations.discard((x+1, y-1))
possible_locations.discard((x+1, y+1))
return obstacles
def make_scenery(width, height, num_obstacles):
obstacles = make_obstacles(width, height, num_obstacles)
scenery = Image(width, height)
for x, y in obstacles:
scenery.set_pixel(x, y, 4)
return scenery
def move_ship(ship_x, ship_y):
if button_a.is_pressed() and button_b.is_pressed():
return ship_x, ship_y
elif button_a.is_pressed():
return max(0, ship_x - 1), ship_y
elif button_b.is_pressed():
return min(4, ship_x + 1), ship_y
else:
return ship_x, ship_y
def detect_collision(scenery, ship_x, ship_y):
return scenery.get_pixel(ship_x, ship_y) > 0
def draw_screen(scenery, ship_x, ship_y):
scene = scenery.crop(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT).copy()
scene.set_pixel(ship_x, ship_y, 9)
display.show(scene)
def scroll_up(scenery):
row_zero = [scenery.get_pixel(x, 0) for x in range(scenery.width())]
scenery = scenery.shift_up(1)
for x, v in enumerate(row_zero):
scenery.set_pixel(x, scenery.height()-1, v)
return scenery
while True:
scenery = make_scenery(SCREEN_WIDTH,
SCREEN_HEIGHT * SCENERY_LENGTH_IN_SCREENS,
NUM_OBSTACLES)
ship_x = SCREEN_WIDTH // 2
ship_y = 0
collision = False
while not collision:
scenery= scroll_up(scenery)
collision = detect_collision(scenery, ship_x, ship_y)
if collision:
break
for _ in range(SECONDS_PER_STEP * POLLS_PER_SECOND):
ship_x, ship_y = move_ship(ship_x, ship_y)
collision = detect_collision(scenery, ship_x, ship_y)
if collision:
break
draw_screen(scenery, ship_x, ship_y)
sleep(1000 / POLLS_PER_SECOND)
display.show(Image.SKULL)
sleep(2000)
| {
"repo_name": "burrowsa/microbit-projects",
"path": "descender.py",
"copies": "1",
"size": "3293",
"license": "bsd-2-clause",
"hash": 6301568811550781000,
"line_mean": 28.1415929204,
"line_max": 111,
"alpha_frac": 0.6061342241,
"autogenerated": false,
"ratio": 3.2189638318670575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43250980559670577,
"avg_score": null,
"num_lines": null
} |
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
from gym.spaces import prng
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = prng.np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) | {
"repo_name": "openai/multiagent-particle-envs",
"path": "multiagent/multi_discrete.py",
"copies": "1",
"size": "2344",
"license": "mit",
"hash": -6337018165133105000,
"line_mean": 52.2954545455,
"line_max": 122,
"alpha_frac": 0.6616894198,
"autogenerated": false,
"ratio": 3.3014084507042254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4463097870504225,
"avg_score": null,
"num_lines": null
} |
# An OLE file format parser
import os
import struct
import logging
import datetime
PIDSI = {'PIDSI_CODEPAGE':0x01, 'PIDSI_TITLE':0x02, 'PIDSI_SUBJECT':0x03, 'PIDSI_AUTHOR':0x04, 'PIDSI_KEYWORDS':0x05,
'PIDSI_COMMENTS':0x06, 'PIDSI_TEMPLATE':0x07, 'PIDSI_LASTAUTHOR':0x08, 'PIDSI_REVNUMBER':0x09, 'PIDSI_EDITTIME':0x0A,
'PIDSI_LASTPRINTED':0x0B, 'PIDSI_CREATE_DTM':0x0C, 'PIDSI_LASTSAVE_DTM':0x0D, 'PIDSI_PAGECOUNT':0x0E, 'PIDSI_WORDCOUNT':0x0F,
'PIDSI_CHARCOUNT':0x10, 'PIDSI_APPNAME':0x12, 'PIDSI_DOC_SECURITY':0x13}
PIDDSI = {'GKPIDDSI_CODEPAGE':0x01, 'GKPIDDSI_CATEGORY':0x02, 'GKPIDDSI_PRESFORMAT':0x03, 'GKPIDDSI_BYTECOUNT':0x04, 'GKPIDDSI_LINECOUNT':0x05,
'GKPIDDSI_PARACOUNT':0x06, 'GKPIDDSI_SLIDECOUNT':0x07, 'GKPIDDSI_NOTECOUNT':0x08, 'GKPIDDSI_HIDDENCOUNT':0x09, 'GKPIDDSI_MMCLIPCOUNT':0x0A,
'GKPIDDSI_SCALE':0x0B, 'GKPIDDSI_HEADINGPAIR':0x0C, 'GKPIDDSI_DOCPARTS':0x0D, 'GKPIDDSI_MANAGER':0x0E, 'GKPIDDSI_COMPANY':0x0F,
'GKPIDDSI_LINKSDIRTY':0x10, 'GKPIDDSI_CCHWITHSPACES':0x11, 'GKPIDDSI_SHAREDDOC':0x13, 'GKPIDDSI_LINKBASE':0x14, 'GKPIDDSI_HLINKS':0x15,
'GKPIDDSI_HYPERLINKSCHANGED':0x16, 'GKPIDDSI_VERSION':0x17, 'GKPIDDSI_DIGSIG':0x18, 'GKPIDDSI_CONTENTTYPE':0x1A, 'GKPIDDSI_CONTENTSTATUS':0x1B,
'GKPIDDSI_LANGUAGE':0x1C, 'GKPIDDSI_DOCVERSION':0x1D}
PropertyType= {'VT_EMPTY':0x00, 'VT_NULL':0x01, 'VT_I2':0x02, 'VT_I4':0x03, 'VT_R4':0x04, 'VT_R8':0x05, 'VT_CY':0x06, 'VT_DATE': 0x07, 'VT_BSTR':0x08,
'VT_ERROR':0x0A, 'VT_BOOL':0x0B, 'VT_VARIANT':0x0C, 'VT_DECIMAL':0x0E, 'VT_I1':0x10, 'VT_UI1':0x11, 'VT_UI2':0x12, 'VT_UI4':0x13, 'VT_I8':0x14, 'VT_UI8':0x15,
'VT_INT':0x16, 'VT_UINT':0x17, 'VT_LPSTR':0x1E, 'VT_LPWSTR':0x1F, 'VT_FILETIME':0x40, 'VT_BLOB':0x41, 'VT_STREAM':0x42, 'VT_STORAGE':0x43,
'VT_STREAMED_Object':0x44, 'VT_STORED_Object':0x45, 'VT_BLOB_Object':0x46, 'VT_CF':0x47, 'VT_CLSID':0x48, 'VT_VERSIONED_STREAM':0x49,
'VT_VECTOR':0x1000, 'VT_ARRAY':0x2000}
def init_logging(debug):
ole_logger = logging.getLogger('ole.logger')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
if debug:
ole_logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
fh = logging.FileHandler('debug.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ole_logger.addHandler(fh)
else:
ole_logger.setLevel(logging.ERROR)
ch.setLevel(logging.ERROR)
ch.setFormatter(formatter)
ole_logger.addHandler(ch)
if debug:
ole_logger.debug('In debug mode.')
class OLEBase:
ole_logger = logging.getLogger('ole.logger')
def __init__(self):
pass
def _raise_exception(self, error):
#self.ole_logger.error(error)
self.ole_logger.warning(error)
raise Exception(error)
def _filetime_to_datetime(self, microseconds):
seconds, microseconds = divmod(microseconds/10, 1000000)
days, seconds = divmod(seconds, 86400)
date_time = datetime.datetime(1601, 1, 1, 0, 0, 0) + datetime.timedelta(days, seconds, microseconds)
return str(date_time)
class OLEHeader(OLEBase):
Signature = ''
CLSID = ''
MinorVersion = 0
MajorVersion = 0
ByteOrder = 0
SectorShift = 0
MiniSectorShift = 0
Reserved = ''
NumberOfDirectorySectors = 0
NumberOfFATSectors = 0
FirstDirecotrySector = 0
TransactionSignatureNumber = 0
MiniStreamCutoffSize = 0
FirstMiniFATSector = 0
NumberOfMiniFATSectors = 0
FirstDIFATSector = 0
NumberOfDIFATSectors = 0
DIFAT = list()
def __init__(self, data):
self.Signature = ''
self.CLSID = ''
self.MinorVersion = 0
self.MajorVersion = 0
self.ByteOrder = 0
self.SectorShift = 0
self.MiniSectorShift = 0
self.Reserved = ''
self.NumberOfDirectorySectors = 0
self.NumberOfFATSectors = 0
self.FirstDirecotrySector = 0
self.TransactionSignatureNumber = 0
self.MiniStreamCutoffSize = 0
self.FirstMiniFATSector = 0
self.NumberOfMiniFATSectors = 0
self.FirstDIFATSector = 0
self.NumberOfDIFATSectors = 0
self.DIFAT = list()
self.Signature = data[0x00:0x08]
self.ole_logger.debug('OLEHeader.Signature: ' + self.Signature.encode('hex').upper())
if self.Signature != '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
self._raise_exception('OLEHeader.Signature verify failed.')
self.CLSID = data[0x08:0x18]
self.ole_logger.debug('OLEHeader.CLSID: ' + self.CLSID.encode('hex').upper())
if self.CLSID != '\x00' * 16:
self.ole_logger.warning('OLEHeader.CLSID is not null.')
self.MinorVersion = struct.unpack('<H', data[0x18:0x1A])[0]
self.ole_logger.debug('OLEHeader.MinorVersion: ' + str(hex(self.MinorVersion)))
self.MajorVersion = struct.unpack('<H', data[0x1A:0x1C])[0]
self.ole_logger.debug('OLEHeader.MajorVersion: ' + str(hex(self.MajorVersion)))
if self.MajorVersion != 0x03 and self.MajorVersion != 0x04:
self._raise_exception('OLEHeader.MajorVersion has an abnormal value.')
self.ByteOrder = struct.unpack('<H', data[0x1C:0x1E])[0]
if self.ByteOrder == 0xFFFE:
self.ole_logger.debug('OLEHeader.ByteOrder: ' + str(hex(self.ByteOrder)) + ' (little-endian)')
else:
self.ole_logger.debug('OLEHeader.ByteOrder: ' + str(hex(self.ByteOrder)))
self._raise_exception('OLEHeader.ByteOrder has an abnormal value.')
self.SectorShift = struct.unpack('<H', data[0x1E:0x20])[0]
if self.SectorShift == 0x09:
self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift)) + ' (512 bytes)')
elif self.SectorShift == 0x0C:
self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift)) + ' (4096 bytes)')
else:
self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift)))
self._raise_exception('OLEHeader.SectorShift has an abnormal value.')
self.MiniSectorShift = struct.unpack('<H', data[0x20:0x22])[0]
if self.MiniSectorShift == 0x06:
self.ole_logger.debug('OLEHeader.MiniSectorShift: ' + str(hex(self.MiniSectorShift)) + ' (64 bytes)')
else:
self.ole_logger.debug('OLEHeader.MiniSectorShift: ' + str(hex(self.MiniSectorShift)))
self._raise_exception('OLEHeader.MiniSectorShift has an abnormal value.')
self.Reserved = data[0x22:0x28]
self.ole_logger.debug('OLEHeader.Reserved: ' + self.Reserved.encode('hex').upper())
if self.Reserved != '\x00' * 6:
self.ole_logger.warning('OLEHeader.Reserved is not all zeros.')
self.NumberOfDirectorySectors = struct.unpack('<I', data[0x28:0x2C])[0]
self.ole_logger.debug('OLEHeader.NumberOfDirectorySectors: ' + str(hex(self.NumberOfDirectorySectors)))
if self.NumberOfDirectorySectors != 0x0 and self.MajorVersion != 0x04:
self._raise_exception('OLEHeader.NumberOfDirectorySectors has an abnormal value.')
self.NumberOfFATSectors = struct.unpack('<I', data[0x2C:0x30])[0]
self.ole_logger.debug('OLEHeader.NumberOfFATSectors: ' + str(hex(self.NumberOfFATSectors)))
self.FirstDirecotrySector = struct.unpack('<I', data[0x30:0x34])[0]
self.ole_logger.debug('OLEHeader.FirstDirecotrySector: ' + str(hex(self.FirstDirecotrySector)))
if self.FirstDirecotrySector == 0:
self._raise_exception('OLEHeader.FirstDirecotrySector is zero.')
self.TransactionSignatureNumber = struct.unpack('<I', data[0x34:0x38])[0]
self.ole_logger.debug('OLEHeader.TransactionSignatureNumber: ' + str(hex(self.TransactionSignatureNumber)))
self.MiniStreamCutoffSize = struct.unpack('<I', data[0x38:0x3C])[0]
self.ole_logger.debug('OLEHeader.MiniStreamCutoffSize: ' + str(hex(self.MiniStreamCutoffSize)))
if self.MiniStreamCutoffSize != 0x1000:
self._raise_exception('OLEHeader.MiniStreamCutoffSize has an abnormal value.')
self.FirstMiniFATSector = struct.unpack('<I', data[0x3C:0x40])[0]
self.ole_logger.debug('OLEHeader.FirstMiniFATSector: ' + str(hex(self.FirstMiniFATSector)))
self.NumberOfMiniFATSectors = struct.unpack('<I', data[0x40:0x44])[0]
self.ole_logger.debug('OLEHeader.NumberOfMiniFATSectors: ' + str(hex(self.NumberOfMiniFATSectors)))
if self.NumberOfMiniFATSectors > 0 and self.FirstMiniFATSector == 0xFFFFFFFE:
self._raise_exception('OLEHeader.NumberOfMiniFATSectors or OLEHeader.FirstMiniFATSector has an abnormal value.')
self.FirstDIFATSector = struct.unpack('<I', data[0x44:0x48])[0]
self.ole_logger.debug('OLEHeader.FirstDIFATSector: ' + str(hex(self.FirstDIFATSector)))
self.NumberOfDIFATSectors = struct.unpack('<I', data[0x48:0x4C])[0]
self.ole_logger.debug('OLEHeader.NumberOfDIFATSectors: ' + str(hex(self.NumberOfDIFATSectors)))
if self.NumberOfDIFATSectors > 0 and self.FirstDIFATSector == 0xFFFFFFFE:
self._raise_exception('OLEHeader.NumberOfDIFATSectors or OLEHeader.FirstDIFATSector has an abnormal value.')
for i in range(0, 109):
difat = struct.unpack('<I', data[0x4C+i*4:0x4C+i*4+4])[0]
if difat == 0xFFFFFFFF:
break
self.ole_logger.debug('OLEHeader.DIFAT[' + str(i) + '] :' + str(hex(difat)))
self.DIFAT.append(difat)
i += 1
for j in range(i, 109):
difat = struct.unpack('<I', data[0x4C+j*4:0x4C+j*4+4])[0]
if difat != 0xFFFFFFFF:
self._raise_exception('OLEHeader.DIFAT[' + str(j) + '] has an abnormal value.')
class Directory(OLEBase):
Name = ''
NameLength = 0
ObjectType = 0
ColorFlag = 0
LeftSiblingID = 0
RightSiblingID = 0
ChildID = 0
CLSID = ''
StateBits = 0
CreationTime = ''
ModifiedTime = ''
StartingSector = 0
StreamSize = 0
def __init__(self, data):
self.Name = ''
self.NameLength = 0
self.ObjectType = 0
self.ColorFlag = 0
self.LeftSiblingID = 0
self.RightSiblingID = 0
self.ChildID = 0
self.CLSID = ''
self.StateBits = 0
self.CreationTime = ''
self.ModifiedTime = ''
self.StartingSector = 0
self.StreamSize = 0
self.Name = data[0:0x40].decode('utf-16').strip('\x00')
self.ole_logger.debug('Dir.Name: ' + self.Name)
self.NameLength = struct.unpack('<H', data[0x40:0x42])[0]
self.ole_logger.debug('Dir.NameLength: ' + str(self.NameLength))
if self.NameLength != len(self.Name)*2+2:
self._raise_exception('DirectoryEntry.NameLength has a wrong value.')
self.ObjectType = ord(data[0x42])
if self.ObjectType == 0x00:
self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (unallocated)')
elif self.ObjectType == 0x01:
self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (storage object)')
elif self.ObjectType == 0x02:
self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (stream object)')
elif self.ObjectType == 0x05:
self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (root storage object)')
else:
self._raise_exception('DirectoryEntry.ObjectType has an abnormal value.')
self.ColorFlag = ord(data[0x43])
if self.ColorFlag == 0x00:
self.ole_logger.debug('Dir.ColorFlag: ' + str(self.ColorFlag) + ' (red)')
elif self.ColorFlag == 0x01:
self.ole_logger.debug('Dir.ColorFlag: ' + str(self.ColorFlag) + ' (black)')
else:
self._raise_exception('DirectoryEntry.ColorFlag has an abnormal value.')
self.LeftSiblingID = struct.unpack('<I', data[0x44:0x48])[0]
if self.LeftSiblingID >= 0 and self.LeftSiblingID <= 0xFFFFFFF9:
self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.LeftSiblingID)) + ' (REGSID)')
elif self.LeftSiblingID == 0xFFFFFFFF:
self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.LeftSiblingID)) + ' (NOSTREAM)')
else:
self._raise_exception('DirectoryEntry.LeftSiblingID has an abnormal value.')
self.RightSiblingID = struct.unpack('<I', data[0x48:0x4C])[0]
if self.RightSiblingID >= 0 and self.RightSiblingID <= 0xFFFFFFF9:
self.ole_logger.debug('Dir.RightSiblingID: ' + str(hex(self.RightSiblingID)) + ' (REGSID)')
elif self.RightSiblingID == 0xFFFFFFFF:
self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.RightSiblingID)) + ' (NOSTREAM)')
else:
self._raise_exception('DirectoryEntry.RightSiblingID has an abnormal value.')
self.ChildID = struct.unpack('<I', data[0x4C:0x50])[0]
if self.ChildID >= 0 and self.ChildID <= 0xFFFFFFF9:
self.ole_logger.debug('Dir.ChildID: ' + str(hex(self.ChildID)) + ' (REGSID)')
elif self.ChildID == 0xFFFFFFFF:
self.ole_logger.debug('Dir.ChildID: ' + str(hex(self.ChildID)) + ' (NOSTREAM)')
else:
self._raise_exception('DirectoryEntry.ChildID has an abnormal value.')
self.CLSID = data[0x50:0x60]
self.ole_logger.debug('Dir.CLSID: ' + self.CLSID.encode('hex'))
self.StateBits = struct.unpack('<I', data[0x60:0x64])[0]
self.ole_logger.debug('Dir.StateBits: ' + str(hex(self.StateBits)))
self.CreationTime = struct.unpack('<Q', data[0x64:0x6C])[0]
self.ole_logger.debug('Dir.CreationTime: ' + self._filetime_to_datetime(self.CreationTime))
self.ModifiedTime = struct.unpack('<Q', data[0x6C:0x74])[0]
self.ole_logger.debug('Dir.ModifiedTime: ' + self._filetime_to_datetime(self.ModifiedTime))
self.StartingSector = struct.unpack('<I', data[0x74:0x78])[0]
self.ole_logger.debug('Dir.StartingSector: ' + str(hex(self.StartingSector)))
self.StreamSize = struct.unpack('<Q', data[0x78:0x80])[0]
self.ole_logger.debug('Dir.StreamSize: ' + str(hex(self.StreamSize)))
class PropertyIdentifierAndOffset(OLEBase):
PropertyIdentifier = 0
Offset = 0
def __init__(self, data):
self.PropertyIdentifier = 0
self.Offset = 0
self.PropertyIdentifier = struct.unpack('<I', data[0:4])[0]
self.ole_logger.debug('PropertyIdentifierAndOffset.PropertyIdentifier: ' + str(hex(self.PropertyIdentifier)))
self.Offset = struct.unpack('<I', data[4:8])[0]
self.ole_logger.debug('PropertyIdentifierAndOffset.Offset: ' + str(hex(self.Offset)))
class DocSummaryInfoPropertySet(OLEBase):
Size = 0
NumProperties = 0
PropertyIdentifierAndOffset = list()
Property = list()
def __init__(self, data):
self.Size = 0
self.NumProperties = 0
self.PropertyIdentifierAndOffset = list()
self.Property = list()
self.Size = struct.unpack('<I', data[0x00:0x04])[0]
self.ole_logger.debug('DocSummaryInfoPropertySet.Size: ' + str(hex(self.Size)))
self.NumProperties = struct.unpack('<I', data[0x04:0x08])[0]
self.ole_logger.debug('DocSummaryInfoPropertySet.NumProperties: ' + str(hex(self.NumProperties)))
for i in range(0, self.NumProperties):
piao = PropertyIdentifierAndOffset(data[0x08+i*8:0x08+i*8+8])
self.PropertyIdentifierAndOffset.append(piao)
for i in range(0, self.NumProperties):
if (i+1) < self.NumProperties:
if self.PropertyIdentifierAndOffset[i].Offset < self.PropertyIdentifierAndOffset[i+1].Offset:
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.PropertyIdentifierAndOffset[i+1].Offset]
else:
self.ole_logger.warning('DocSummaryInfoPropertySet.PropertyIdentifierAndOffset.Offset is not in increasing order.')
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size]
else:
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size]
self.Property.append(property)
for i in range(0, self.NumProperties):
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_CODEPAGE']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_CODEPAGE.type: ' + str(hex(type)))
if type != PropertyType['VT_I2']:
self._raise_exception('Property.GKPIDDSI_CODEPAGE has an abnormal value.')
codepage = struct.unpack('<H', self.Property[i][0x04:0x06])[0]
self.ole_logger.debug('Property.GKPIDDSI_CODEPAGE: ' + str(hex(codepage)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_COMPANY']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_COMPANY.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_COMPANY.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.GKPIDDSI_COMPANY.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
company = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
company = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.GKPIDDSI_COMPANY has an abnormal value.')
self.ole_logger.debug('Property.GKPIDDSI_COMPANY: ' + company)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_LINECOUNT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_LINECOUNT.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_LINECOUNT has an abnormal value.')
linecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_LINECOUNT: ' + str(hex(linecount)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_PARACOUNT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_PARACOUNT.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_PARACOUNT has an abnormal value.')
pagecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_PARACOUNT: ' + str(hex(pagecount)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_CCHWITHSPACES']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_CCHWITHSPACES.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_CCHWITHSPACES has an abnormal value.')
pagecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_CCHWITHSPACES: ' + str(hex(pagecount)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_VERSION']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_VERSION.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_VERSION has an abnormal value.')
minorversion = struct.unpack('<H', self.Property[i][0x04:0x06])[0]
majorverson= struct.unpack('<H', self.Property[i][0x06:0x08])[0]
if majorverson == 0:
self._raise_exception('Property.GKPIDDSI_VERSION.MajorVersion has an abnormal value.')
self.ole_logger.debug('Property.GKPIDDSI_VERSION.MajorVersion: ' + str(hex(majorverson)))
self.ole_logger.debug('Property.GKPIDDSI_VERSION.MinorVersion: ' + str(hex(minorversion)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_DOCPARTS']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.type: ' + str(hex(type)))
celements = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.cElements: ' + str(hex(celements)))
if type == (PropertyType['VT_VECTOR'] | PropertyType['VT_LPSTR']):
offset = 0
for j in range(0, celements):
cch = struct.unpack('<I', self.Property[i][0x08+offset:0x0C+offset])[0]
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch has an abnormal value.')
value = self.Property[i][0x0C+offset:0x0C+offset+cch]
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + ']: ' + value.encode('hex'))
offset = offset + 4 + cch
elif type == (PropertyType['VT_VECTOR'] | PropertyType['VT_LPWSTR']):
offset = 0
for j in range(0, celements):
cch = struct.unpack('<I', self.Property[i][0x08+offset:0x0C+offset])[0]
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch has an abnormal value.')
value = self.Property[i][0x0C+offset:0x0C+offset+cch*2].decode('utf-16')
self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + ']: ' + value.encode('hex'))
offset = offset + 4 + cch*2
else:
self._raise_exception('Property.GKPIDDSI_DOCPARTS.type has an abnormal value.')
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_HEADINGPAIR']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.type: ' + str(hex(type)))
if type != (PropertyType['VT_VECTOR'] | PropertyType['VT_VARIANT']):
self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.type has an abnormal value.')
celements = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.cElements: ' + str(hex(celements)))
offset = 0
for j in range(0, celements/2):
strtype = struct.unpack('<H', self.Property[i][0x08+offset:0x0A+offset])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.type: ' + str(hex(strtype)))
cch = struct.unpack('<I', self.Property[i][0x0C+offset:0x10+offset])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.cch has an abnormal value.')
if strtype == PropertyType['VT_LPSTR']:
value = self.Property[i][0x10+offset:0x10+offset+cch]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString: ' + value)
partstype = struct.unpack('<H', self.Property[i][0x10+offset+cch:0x10+offset+cch+0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type: ' + str(hex(partstype)))
if partstype != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type has an abnormal value.')
parts = struct.unpack('<I', self.Property[i][0x10+offset+cch+0x04:0x10+offset+cch+0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts: ' + str(hex(parts)))
offset = offset + 0x10 + cch
elif strtype == PropertyType['VT_LPWSTR']:
value = self.Property[i][0x10+offset:0x10+offset+cch*2].decode('utf-16')
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString: ' + value)
partstype = struct.unpack('<H', self.Property[i][0x10+offset+cch*2:0x10+offset+cch*2+0x02])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type: ' + str(hex(partstype)))
if partstype != PropertyType['VT_I4']:
self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type has an abnormal value.')
parts = struct.unpack('<I', self.Property[i][0x10+offset+cch*2+0x04:0x10+offset+cch*2+0x08])[0]
self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts: ' + str(hex(parts)))
offset = offset + 0x10 + cch*2
else:
self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.type has an abnormal value.')
continue
class DocSummaryInfo(OLEBase):
byteOrder = 0
version = 0
sysId = 0
OSMajorVersion = 0
OSMinorVersion = 0
OSType = 0
applicationClsid = ''
cSections = 0
formatId1 = ''
sectionOffset1 = 0
formatId2 = ''
sectionOffset2 = 0
DocumentSummaryInfoPropertySet = None
def __init__(self, data):
self.byteOrder = 0
self.version = 0
self.sysId = 0
self.OSMajorVersion = 0
self.OSMinorVersion = 0
self.OSType = 0
self.applicationClsid = ''
self.cSections = 0
self.formatId1 = ''
self.sectionOffset1 = 0
self.formatId2 = ''
self.sectionOffset2 = 0
self.DocumentSummaryInfoPropertySet = None
self.ole_logger.debug('######## DocumentSummaryInfo ########')
self.byteOrder = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('DocumentSummaryInfo.byteOrder: ' + str(hex(self.byteOrder)))
if self.byteOrder != 0xFFFE:
self._raise_exception('DocumentSummaryInfo.byteOrder has an abnormal value.')
self.version = struct.unpack('<H', data[0x02:0x04])[0]
self.ole_logger.debug('DocumentSummaryInfo.version: ' + str(hex(self.version)))
if self.version != 0 and self.version != 1:
self._raise_exception('DocumentSummaryInfo.version has an abnormal value.')
self.sysId = struct.unpack('<I', data[0x04:0x08])[0]
self.OSMajorVersion = ord(data[0x04])
self.ole_logger.debug('DocumentSummaryInfo.sysId.OSMajorVersion: ' + str(hex(self.OSMajorVersion)))
self.OSMinorVersion = ord(data[0x05])
self.ole_logger.debug('DocumentSummaryInfo.sysId.OSMinorVersion: ' + str(hex(self.OSMinorVersion)))
self.OSType = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('DocumentSummaryInfo.sysId.OSType: ' + str(hex(self.OSType)))
self.applicationClsid = data[0x08:0x18]
self.ole_logger.debug('DocumentSummaryInfo.applicationClsid: ' + self.applicationClsid.encode('hex'))
if self.applicationClsid != '\x00' * 0x10:
self._raise_exception('DocumentSummaryInfo.applicationClsid has an abnormal value.')
self.cSections = struct.unpack('<I', data[0x18:0x1C])[0]
self.ole_logger.debug('DocumentSummaryInfo.cSections: ' + str(hex(self.cSections)))
if self.cSections != 1 and self.cSections != 2:
self._raise_exception('DocumentSummaryInfo.cSections has an abnormal value.')
self.formatId1 = data[0x1C:0x2C]
self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.formatId: ' + self.formatId1.encode('hex'))
if self.formatId1 != '\x02\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE':
self._raise_exception('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.formatId has an abnormal value.')
self.sectionOffset1 = struct.unpack('<I', data[0x2C:0x30])[0]
self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.sectionOffset: ' + str(hex(self.sectionOffset1)))
if self.cSections == 2:
self.formatId2 = data[0x30:0x40]
self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.formatId: ' + self.formatId2.encode('hex'))
if self.formatId2 != '\x05\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE':
self._raise_exception('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.formatId has an abnormal value.')
self.sectionOffset2 = struct.unpack('<I', data[0x40:0x44])[0]
self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.sectionOffset: ' + str(hex(self.sectionOffset2)))
self.DocumentSummaryInfoPropertySet = DocSummaryInfoPropertySet(data[self.sectionOffset1:])
class SummaryInfoPropertySet(OLEBase):
Size = 0
NumProperties = 0
PropertyIdentifierAndOffset = list()
Property = list()
def __init__(self, data):
self.Size = 0
self.NumProperties = 0
self.PropertyIdentifierAndOffset = list()
self.Property = list()
self.Size = struct.unpack('<I', data[0x00:0x04])[0]
self.ole_logger.debug('SummaryInfoPropertySet.Size: ' + str(hex(self.Size)))
self.NumProperties = struct.unpack('<I', data[0x04:0x08])[0]
self.ole_logger.debug('SummaryInfoPropertySet.NumProperties: ' + str(hex(self.NumProperties)))
for i in range(0, self.NumProperties):
piao = PropertyIdentifierAndOffset(data[0x08+i*8:0x08+i*8+8])
self.PropertyIdentifierAndOffset.append(piao)
for i in range(0, self.NumProperties):
if (i+1) < self.NumProperties:
if self.PropertyIdentifierAndOffset[i].Offset < self.PropertyIdentifierAndOffset[i+1].Offset:
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.PropertyIdentifierAndOffset[i+1].Offset]
else:
self.ole_logger.warning('SummaryInfoPropertySet.PropertyIdentifierAndOffset.Offset is not in increasing order.')
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size]
else:
property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size]
self.Property.append(property)
for i in range(0, self.NumProperties):
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CODEPAGE']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_CODEPAGE.type: ' + str(hex(type)))
if type != PropertyType['VT_I2']:
self._raise_exception('Property.PIDSI_CODEPAGE has an abnormal value.')
codepage = struct.unpack('<H', self.Property[i][0x04:0x06])[0]
self.ole_logger.debug('Property.PIDSI_CODEPAGE: ' + str(hex(codepage)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_TITLE']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_TITLE.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_TITLE.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_TITLE.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_TITLE has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_TITLE: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_SUBJECT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_SUBJECT.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_SUBJECT.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_SUBJECT.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_SUBJECT has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_SUBJECT: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_AUTHOR']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_AUTHOR.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_AUTHOR.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_AUTHOR.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_AUTHOR has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_AUTHOR: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_KEYWORDS']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_KEYWORDS.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_KEYWORDS.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_KEYWORDS.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_KEYWORDS has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_KEYWORDS: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_COMMENTS']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_COMMENTS.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_COMMENTS.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_COMMENTS.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_COMMENTS has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_COMMENTS: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_TEMPLATE']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_TEMPLATE.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_TEMPLATE.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_TEMPLATE.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_TEMPLATE has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_TEMPLATE: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTAUTHOR']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_LASTAUTHOR.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_LASTAUTHOR.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_LASTAUTHOR.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_LASTAUTHOR has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_LASTAUTHOR: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_REVNUMBER']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_REVNUMBER.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_REVNUMBER.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_REVNUMBER.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_REVNUMBER has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_REVNUMBER: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_APPNAME']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_APPNAME.type: ' + str(hex(type)))
cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_APPNAME.cch: ' + str(hex(cch)))
if cch > 0x0000FFFF:
self._raise_exception('Property.PIDSI_APPNAME.cch has an abnormal value.')
if type == PropertyType['VT_LPSTR']:
data = self.Property[i][0x08:0x08+cch]
elif type == PropertyType['VT_LPWSTR']:
data = self.Property[i][0x08:0x08+cch*2].decode('utf-16')
else:
self._raise_exception('Property.PIDSI_APPNAME has an abnormal value.')
self.ole_logger.debug('Property.PIDSI_APPNAME: ' + data)
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_EDITTIME']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_APPNAME.type: ' + str(hex(type)))
if type != PropertyType['VT_FILETIME']:
self._raise_exception('Property.PIDSI_EDITTIME has an abnormal value.')
time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0]
self.ole_logger.debug('Property.PIDSI_EDITTIME: ' + str(hex(time)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTPRINTED']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_LASTPRINTED.type: ' + str(hex(type)))
if type != PropertyType['VT_FILETIME']:
self._raise_exception('Property.PIDSI_LASTPRINTED has an abnormal value.')
time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0]
self.ole_logger.debug('Property.PIDSI_LASTPRINTED: ' + self._filetime_to_datetime(time))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CREATE_DTM']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_CREATE_DTM.type: ' + str(hex(type)))
if type != PropertyType['VT_FILETIME']:
self._raise_exception('Property.PIDSI_CREATE_DTM has an abnormal value.')
time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0]
self.ole_logger.debug('Property.PIDSI_CREATE_DTM: ' + self._filetime_to_datetime(time))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTSAVE_DTM']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_LASTSAVE_DTM.type: ' + str(hex(type)))
if type != PropertyType['VT_FILETIME']:
self._raise_exception('Property.PIDSI_LASTSAVE_DTM has an abnormal value.')
time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0]
self.ole_logger.debug('Property.PIDSI_LASTSAVE_DTM: ' + self._filetime_to_datetime(time))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_PAGECOUNT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_PAGECOUNT.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.PIDSI_PAGECOUNT has an abnormal value.')
count = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_PAGECOUNT: ' + str(hex(count)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_WORDCOUNT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_WORDCOUNT.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.PIDSI_WORDCOUNT has an abnormal value.')
count = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_WORDCOUNT: ' + str(hex(count)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CHARCOUNT']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_CHARCOUNT.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.PIDSI_CHARCOUNT has an abnormal value.')
count = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_CHARCOUNT: ' + str(hex(count)))
continue
if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_DOC_SECURITY']:
type = struct.unpack('<H', self.Property[i][0x00:0x02])[0]
self.ole_logger.debug('Property.PIDSI_DOC_SECURITY.type: ' + str(hex(type)))
if type != PropertyType['VT_I4']:
self._raise_exception('Property.PIDSI_DOC_SECURITY has an abnormal value.')
security = struct.unpack('<I', self.Property[i][0x04:0x08])[0]
self.ole_logger.debug('Property.PIDSI_DOC_SECURITY: ' + str(hex(security)))
continue
class SummaryInfo(OLEBase):
byteOrder = 0
version = 0
sysId = 0
OSMajorVersion = 0
OSMinorVersion = 0
OSType = 0
applicationClsid = ''
cSections = 0
formatId1 = ''
sectionOffset1 = 0
formatId2 = ''
sectionOffset2 = 0
SummaryInfoPropertySet = None
def __init__(self, data):
self.byteOrder = 0
self.version = 0
self.sysId = 0
self.OSMajorVersion = 0
self.OSMinorVersion = 0
self.OSType = 0
self.applicationClsid = ''
self.cSections = 0
self.formatId1 = ''
self.sectionOffset1 = 0
self.formatId2 = ''
self.sectionOffset2 = 0
self.SummaryInfoPropertySet = None
self.ole_logger.debug('######## SummaryInfo ########')
self.byteOrder = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('SummaryInfo.byteOrder: ' + str(hex(self.byteOrder)))
if self.byteOrder != 0xFFFE:
self._raise_exception('DocumentSummaryInfo.byteOrder has an abnormal value.')
self.version = struct.unpack('<H', data[0x02:0x04])[0]
self.ole_logger.debug('SummaryInfo.version: ' + str(hex(self.version)))
if self.version != 0 and self.version != 1:
self._raise_exception('SummaryInfo.version has an abnormal value.')
self.sysId = struct.unpack('<I', data[0x04:0x08])[0]
self.ole_logger.debug('SummaryInfo.sysId: ' + str(hex(self.sysId)))
self.clsid = data[0x08:0x18]
self.ole_logger.debug('SummaryInfo.clsid: ' + self.clsid.encode('hex'))
if self.clsid != '\x00' * 0x10:
self._raise_exception('SummaryInfo.clsid has an abnormal value.')
self.cSections = struct.unpack('<I', data[0x18:0x1C])[0]
self.ole_logger.debug('SummaryInfo.cSections: ' + str(hex(self.cSections)))
if self.cSections != 1 and self.cSections != 2:
self._raise_exception('SummaryInfo.cSections has an abnormal value.')
self.formatId1 = data[0x1C:0x2C]
self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-1.formatId: ' + self.formatId1.encode('hex'))
if self.formatId1 != '\xE0\x85\x9F\xF2\xF9\x4F\x68\x10\xAB\x91\x08\x00\x2B\x27\xB3\xD9':
self._raise_exception('SummaryInfo.rgIdOffset.IdOffsetElement-1.formatId has an abnormal value.')
self.sectionOffset1 = struct.unpack('<I', data[0x2C:0x30])[0]
self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.sectionOffset: ' + str(hex(self.sectionOffset1)))
if self.cSections == 2:
self.formatId2 = data[0x30:0x40]
self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-2.formatId: ' + self.formatId2.encode('hex'))
if self.formatId2 != '\x05\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE':
self._raise_exception('SummaryInfo.rgIdOffset.IdOffsetElement-2.formatId has an abnormal value.')
self.sectionOffset2 = struct.unpack('<I', data[0x40:0x44])[0]
self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-2.sectionOffset: ' + str(hex(self.sectionOffset2)))
self.SummaryInfoPropertySet = SummaryInfoPropertySet(data[self.sectionOffset1:])
class OLEFile(OLEBase):
file_data = None
sector_size = 0
mini_sector_size = 0
OLEHeader = None
DIFAT = list()
FAT = list()
MiniFAT = list()
Directory = list()
SummaryInfo = None
DocumentSummaryInfo = None
def __init__(self, filename):
self.file_data = None
self.sector_size = 0
self.mini_sector_size = 0
self.OLEHeader = None
self.DIFAT = list()
self.FAT = list()
self.MiniFAT = list()
self.Directory = list()
self.SummaryInfo = None
self.DocumentSummaryInfo = None
if os.path.isfile(filename):
self.file_data = open(filename, 'rb').read()
self.ole_logger.debug('Load file: ' + filename)
self.OLEHeader = OLEHeader(self.file_data)
if self.OLEHeader.SectorShift == 0x09:
self.sector_size = 512
elif self.OLEHeader.SectorShift == 0x0C:
self.sector_size = 4096
else:
self._raise_exception('Invalid Sector Size.')
if self.OLEHeader.MiniSectorShift == 0x06:
self.mini_sector_size = 64
else:
self._raise_exception('Invalid MiniSector Size.')
self._init_fat_chain()
if self.OLEHeader.NumberOfMiniFATSectors > 0:
self._init_minifat_chain()
self._init_dir_entry()
for i in range(0, len(self.Directory)):
if self.Directory[i].Name == '\x05SummaryInformation':
self.SummaryInfo = SummaryInfo(self.find_object_by_index(i))
if self.Directory[i].Name == '\x05DocumentSummaryInformation':
self.DocumentSummaryInfo = DocSummaryInfo(self.find_object_by_index(i))
else:
self._raise_exception('Invalid file: ' + filename)
def _init_fat_chain(self):
self.DIFAT = list(self.OLEHeader.DIFAT)
if self.OLEHeader.NumberOfDIFATSectors > 0:
difat_sector_index = self.OLEHeader.FirstDIFATSector
for i in range(0, self.OLEHeader.NumberOfDIFATSectors):
difat_sector_offset = (difat_sector_index+1) * self.sector_size
self.ole_logger.debug('DIFAT sector #' + str(i) + ' at offset: ' + str(hex(difat_sector_offset)))
for j in range(0, self.sector_size/4-1):
difat = struct.unpack('<I', self.file_data[difat_sector_offset+j*4:difat_sector_offset+j*4+4])[0]
if difat == 0xFFFFFFFF:
if i+1 == self.OLEHeader.NumberOfDIFATSectors:
break
else:
_raise_exception('Encounter an invalid DIFAT value when parsing DIFAT chain.')
self.ole_logger.debug('DIFT[' + str(len(self.DIFAT)) + ']: ' + str(hex(difat)))
self.DIFAT.append(difat)
difat_sector_index = struct.unpack('<I', self.file_data[difat_sector_offset+j*4:difat_sector_offset+j*4+4])[0]
if len(self.DIFAT) != self.OLEHeader.NumberOfFATSectors:
self.ole_logger.warn('OLEHeader.NumberOfFATSectors does not mahtch the number of the DIFAT entries.')
for i in range(0, self.OLEHeader.NumberOfFATSectors):
fat_sector_index = self.DIFAT[i]
fat_sector_offset = (fat_sector_index+1) * self.sector_size
self.ole_logger.debug('FAT sector #' + str(i) + ' at offset: ' + str(hex(fat_sector_offset)))
for j in range(0, self.sector_size/4):
fat = struct.unpack('<I', self.file_data[fat_sector_offset+j*4:fat_sector_offset+j*4+4])[0]
self.FAT.append(fat)
if fat == 0xFFFFFFFC:
self.ole_logger.debug('FAT[' + str(len(self.FAT)-1) + '] is a DIFAT sector')
if fat == 0xFFFFFFFD:
self.ole_logger.debug('FAT[' + str(len(self.FAT)-1) + '] is a FAT sector')
def _init_minifat_chain(self):
minifat_sector_index = self.OLEHeader.FirstMiniFATSector
i = 0
while i < self.OLEHeader.NumberOfMiniFATSectors:
minifat_sector_offset = (minifat_sector_index+1) * self.sector_size
self.ole_logger.debug('MiniFAT sector #' + str(i) + ' at offset: ' + str(hex(minifat_sector_offset)))
for j in range(0, self.sector_size/4):
minifat = struct.unpack('<I', self.file_data[minifat_sector_offset+j*4:minifat_sector_offset+j*4+4])[0]
self.MiniFAT.append(minifat)
minifat_sector_index = self.FAT[minifat_sector_index]
if minifat_sector_index == 0xFFFFFFFE:
self.ole_logger.debug('MiniFAT sector chain ended.')
break
i += 1
if (i+1) != self.OLEHeader.NumberOfMiniFATSectors:
self.ole_logger.warn('self.OLEHeader.NumberOfMiniFATSectors does not match the length of the MiniFat sector chian.')
def _init_dir_entry(self):
dir_sector_index = self.OLEHeader.FirstDirecotrySector
is_end = False
while True:
dir_sector_offset = (dir_sector_index+1) * self.sector_size
for i in range(0, self.sector_size/128):
if (dir_sector_offset+i*128+128) > len(self.file_data):
self.ole_logger.warning('Direcotry sector offset larger than file size.')
is_end = True
break
dir_data = self.file_data[dir_sector_offset+i*128:dir_sector_offset+i*128+128]
if struct.unpack('<H', dir_data[0x40:0x42])[0] == 0:
is_end = True
break
self.ole_logger.debug('[----- Directory #' + str(len(self.Directory)) + ' -----]')
try:
directory = Directory(dir_data)
self.Directory.append(directory)
except:
self.ole_logger.debug('Directory #' + str(len(self.Directory)) + ' contains abnormal structure.')
dir_sector_index = self.FAT[dir_sector_index]
if is_end or dir_sector_index == 0xFFFFFFFE:
break
def find_object_by_name(self, name):
data = ''
dir_number = len(self.Directory)
for i in range(0, dir_number):
directory = self.Directory[i]
if name == directory.Name:
if directory.ObjectType != 0x02 and directory.ObjectType != 0x05:
return directory
sector_index = directory.StartingSector
if sector_index == 0xFFFFFFFE:
self.ole_logger.debug('Object: ' + name + ' has no data.')
return None
if directory.StreamSize < self.OLEHeader.MiniStreamCutoffSize and len(self.MiniFAT) > 0 and name != 'Root Entry':
ministream = self.find_object_by_name('Root Entry')
if len(ministream) > 0:
while sector_index != 0xFFFFFFFE:
sector_offset = sector_index * 0x40
data += ministream[sector_offset:sector_offset+0x40]
sector_index = self.MiniFAT[sector_index]
else:
self.ole_logger.debug('Mini Stream is null.')
return None
else:
while sector_index != 0xFFFFFFFE:
sector_offset = (sector_index+1) * self.sector_size
data += self.file_data[sector_offset:sector_offset+self.sector_size]
sector_index = self.FAT[sector_index]
break
if (i+1) == dir_number:
self.ole_logger.debug('Could not find object: ' + name)
return None
if directory.StreamSize > len(data):
self.ole_logger.warn('DirectoryEntry.StreamSize larger than real data size.')
return None
return data[0: directory.StreamSize]
def find_object_by_index(self, index):
data = ''
if index < 0 or index >= len(self.Directory):
self.ole_logger.warn('Index out of boundary.')
return None
directory = self.Directory[index]
if directory.ObjectType != 0x02 and directory.ObjectType != 0x05:
return directory
sector_index = directory.StartingSector
if sector_index == 0xFFFFFFFE:
self.ole_logger.debug('Object #' + str(index) + ' has no data.')
return None
if directory.StreamSize < self.OLEHeader.MiniStreamCutoffSize and len(self.MiniFAT) > 0:
ministream = self.find_object_by_name('Root Entry')
if len(ministream) > 0:
while sector_index != 0xFFFFFFFE:
sector_offset = sector_index * 0x40
data += ministream[sector_offset:sector_offset+0x40]
sector_index = self.MiniFAT[sector_index]
else:
self.ole_logger.debug('Mini Stream is null.')
return None
else:
while sector_index != 0xFFFFFFFE:
sector_offset = (sector_index+1) * self.sector_size
data += self.file_data[sector_offset:sector_offset+self.sector_size]
sector_index = self.FAT[sector_index]
if directory.StreamSize > len(data):
self.ole_logger.warn('DirectoryEntry.StreamSize larger than real data size.')
return None
return data[0: directory.StreamSize]
if __name__ == '__main__':
debug = True
init_logging(debug)
| {
"repo_name": "z3r0zh0u/pyole",
"path": "pyole.py",
"copies": "1",
"size": "60571",
"license": "mit",
"hash": -3830707903068870700,
"line_mean": 51.1265060241,
"line_max": 166,
"alpha_frac": 0.592247115,
"autogenerated": false,
"ratio": 3.4854989066635977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4577746021663598,
"avg_score": null,
"num_lines": null
} |
# An O(long(n)) algorithm that finds an element in a increasingly
# sorted array of n integers that has been rotated an unknown number
# of times.
def _search_rotated(array, left, right, val):
if left > right:
return None
mid = left + ((right - left) // 2)
if array[mid] == val:
return mid
# If left half is in increasing order
elif array[left] < array[mid]:
if array[left] <= val and val < array[mid]:
right = mid - 1
else:
left = mid + 1
# Right half is in increasing order
elif array[left] > array[mid]:
if array[mid] < val and array[right] >= val:
left = mid + 1
else:
right = mid - 1
# Skip duplicate value
else:
left += 1
return _search_rotated(array, left, right, val)
def search_rotated(array, val):
return _search_rotated(array, 0, len(array) - 1, val)
if __name__ == "__main__":
input_array = [15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14]
assert search_rotated(input_array, 5) == 8
assert search_rotated(input_array, 2) is None
input_array = [3, 4, 5, 7, 10, 14, 15, 16, 19, 20, 25, 1]
assert search_rotated(input_array, 3) == 0
assert search_rotated(input_array, 21) is None
input_array = [1, 3, 4, 5, 7, 10, 14, 15, 16, 19, 20, 25]
assert search_rotated(input_array, 5) == 3
assert search_rotated(input_array, 22) is None
assert search_rotated([], 5) is None
| {
"repo_name": "sookoor/PythonInterviewPrep",
"path": "search_rotated.py",
"copies": "1",
"size": "1479",
"license": "mit",
"hash": 424233974785261000,
"line_mean": 28,
"line_max": 68,
"alpha_frac": 0.5767410412,
"autogenerated": false,
"ratio": 3.279379157427938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9302668226671065,
"avg_score": 0.010690394391374784,
"num_lines": 51
} |
"""a non-blocking, non-threaded non-multiprocessing circuits web server"""
import time #timestamping images
import datetime #showing human readable time on render page
import os
import rexviewer as r
import naali
try:
import circuits
except ImportError: #not running within the viewer, but testing outside it
import sys
sys.path.append('..')
from circuits.web import Controller, Server, Static
#for camera rotating
import PythonQt.QtGui
from PythonQt.QtGui import QQuaternion as Quat
from PythonQt.QtGui import QVector3D as Vec
import mathutils as mu
PORT = 28008
#__file__ doesn't work in embedded context, but os.getcwd() helps
OWNPATH = os.getcwd() + "/pymodules/webserver/"
SHOTPATH = OWNPATH + "screenshot/"
#print "WEB PATH for images:", PATH
class WebServer(Server):
"""this is the component that autoload registers to the viewer"""
def __init__(self):
Server.__init__(self, "0.0.0.0:%d" % PORT) #"localhost", PORT)
self + WebController() + Static(docroot=SHOTPATH)
#the first version with relative controls to the cam
relhtml = """\
<html>
<head>
<title>Naali web ui</title>
</head>
<body>
<h1>Naali</h1>
<form action="camcontrol" method="GET">
<p>rotate:<br/>
<input type="submit" name="rotate" value="10"/>
<input type="submit" name="rotate" value="-10"/>
</p>
<p>move:<br/
<input type="submit" name="move" value="+1"/><br>
<input type="submit" name="move" value="-1"/>
</p>
</form>
<img src="%s"/>
</body>
</html>"""
#second version where webui gives absolute pos&ort for the cam, so each user has own on client side
abshtml = open(OWNPATH + "webui.html").read()
def save_screenshot():
rend = naali.renderer
rend.HideCurrentWorldView()
rend.Render()
imgname = "image-%s.png" % time.time()
r.takeScreenshot(SHOTPATH, imgname)
rend.ShowCurrentWorldView()
baseurl = "/"
#baseurl = "http://www.playsign.fi:28080/"
return baseurl, imgname
class WebController(Controller):
def index(self):
return self.serve_file(OWNPATH + "naali.html")
def hello(self):
return "Hello World!"
def camcontrol(self, rotate=None, move=None):
cament = naali.getCamera()
p = cament.placeable
#print p.position, p.orientation
if rotate is not None:
ort = p.orientation
rot = Quat.fromAxisAndAngle(Vec(0, 1, 0), float(rotate))
ort *= rot
p.orientation = ort
if move is not None:
pos = p.position
pos += Vec(float(move), 0, 0)
p.position = pos
baseurl, imgname = save_screenshot()
imgurl = baseurl + imgname
#return "%s, %s" % (p.position, p.orientation)
return relhtml % imgurl
def render(self, camposx=None, camposy=None, camposz=None, camang=None):
#, camortx=None, camorty=None, camortz=None, camortw=None):
cament = naali.getCamera()
p = cament.placeable
if camposx is not None:
pos = Vec(*(float(v) for v in [camposx, camposy, camposz]))
p.position = pos
if camang is not None:
ort = p.orientation
start = Quat(0, 0, -0.707, -0.707)
rot = Quat.fromAxisAndAngle(Vec(0, 1, 0), -float(camang))
new = start * rot
p.orientation = new
#if camortx is not None:
# ort = Quat(*(float(v) for v in [camortw, camortx, camorty, camortz]))
# p.orientation = ort
#return str(p.position), str(p.orientation) #self.render1()
baseurl, imgname = save_screenshot()
imgurl = baseurl + imgname
pos = p.position
ort = p.orientation
#vec, ang = toAngleAxis(p.orientation)
#print vec, ang
euler = mu.quat_to_euler(ort)
ang = euler[0]
if ang < 0:
ang = 360 + ang
return abshtml % (imgurl,
ang,
pos.x(), pos.y(), pos.z()
#ort.scalar(), ort.x(), ort.y(), ort.z(),
)
def _renderimgurl(self, camposx=None, camposy=None, camposz=None, camortx=None, camorty=None, camortz=None, camortw=None):
cament = naali.getCamera()
p = cament.placeable
orgpos = Vec(0, 0, 0)
orgort = Quat(1, 0, 0, 0)
if camposx is not None:
pos = Vec(*(float(v) for v in [camposx, camposy, camposz]))
p.position = pos
if camortx is not None:
ort = Quat(*(float(v) for v in [camortw, camortx, camorty, camortz]))
p.orientation = ort
baseurl, imgname = save_screenshot()
p.position = orgpos
p.orientation = orgort
return baseurl, imgname
def renderimgurl(self, camposx=None, camposy=None, camposz=None, camortx=None, camorty=None, camortz=None, camortw=None):
baseurl, imgname = self._renderimgurl(camposx, camposy, camposz, camortx, camorty, camortz, camortw)
return baseurl + imgname
def renderimg(self, camposx=None, camposy=None, camposz=None, camortx=None, camorty=None, camortz=None, camortw=None):
_, imgname = self._renderimgurl(camposx, camposy, camposz, camortx, camorty, camortz, camortw)
return self.serve_file(SHOTPATH + imgname)
def render1(self, campos=None, camort=None):
timestr = datetime.datetime.today().isoformat()
baseurl, imgname = save_screenshot()
imgurl = baseurl + imgname
return """
<h1>Realxtend Naali viewer</h1>
<h2>at %s</h2>
<img src="%s"/>
""" % (timestr, imgurl)
#~ if __name__ == '__main__':
#~ while 1:
#~ print ".",
| {
"repo_name": "antont/tundra",
"path": "src/Application/PythonScriptModule/pymodules_old/webserver/webcontroller.py",
"copies": "1",
"size": "5782",
"license": "apache-2.0",
"hash": 2955411507951660000,
"line_mean": 29.9197860963,
"line_max": 126,
"alpha_frac": 0.5921826358,
"autogenerated": false,
"ratio": 3.2889647326507396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43811473684507396,
"avg_score": null,
"num_lines": null
} |
_A=None
from ._compat import filename_to_ui,get_text_stderr
from .utils import echo
def _join_param_hints(param_hint):
A=param_hint
if isinstance(A,(tuple,list)):return ' / '.join((repr(B)for B in A))
return A
class ClickException(Exception):
exit_code=1
def __init__(B,message):A=message;super().__init__(A);B.message=A
def format_message(A):return A.message
def __str__(A):return A.message
def show(B,file=_A):
A=file
if A is _A:A=get_text_stderr()
echo(f"Error: {B.format_message()}",file=A)
class UsageError(ClickException):
exit_code=2
def __init__(A,message,ctx=_A):ClickException.__init__(A,message);A.ctx=ctx;A.cmd=A.ctx.command if A.ctx else _A
def show(A,file=_A):
B=file
if B is _A:B=get_text_stderr()
C=_A;D=''
if A.cmd is not _A and A.cmd.get_help_option(A.ctx)is not _A:D=f"Try '{A.ctx.command_path} {A.ctx.help_option_names[0]}' for help.\n"
if A.ctx is not _A:C=A.ctx.color;echo(f"{A.ctx.get_usage()}\n{D}",file=B,color=C)
echo(f"Error: {A.format_message()}",file=B,color=C)
class BadParameter(UsageError):
def __init__(A,message,ctx=_A,param=_A,param_hint=_A):UsageError.__init__(A,message,ctx);A.param=param;A.param_hint=param_hint
def format_message(A):
if A.param_hint is not _A:B=A.param_hint
elif A.param is not _A:B=A.param.get_error_hint(A.ctx)
else:return f"Invalid value: {A.message}"
B=_join_param_hints(B);return f"Invalid value for {B}: {A.message}"
class MissingParameter(BadParameter):
def __init__(A,message=_A,ctx=_A,param=_A,param_hint=_A,param_type=_A):BadParameter.__init__(A,message,ctx,param,param_hint);A.param_type=param_type
def format_message(A):
if A.param_hint is not _A:B=A.param_hint
elif A.param is not _A:B=A.param.get_error_hint(A.ctx)
else:B=_A
B=_join_param_hints(B);D=A.param_type
if D is _A and A.param is not _A:D=A.param.param_type_name
C=A.message
if A.param is not _A:
E=A.param.type.get_missing_message(A.param)
if E:
if C:C+=f". {E}"
else:C=E
F=f" {B}"if B else'';return f"Missing {D}{F}.{' 'if C else''}{C or''}"
def __str__(A):
if A.message is _A:B=A.param.name if A.param else _A;return f"missing parameter: {B}"
else:return A.message
class NoSuchOption(UsageError):
def __init__(A,option_name,message=_A,possibilities=_A,ctx=_A):
C=option_name;B=message
if B is _A:B=f"no such option: {C}"
UsageError.__init__(A,B,ctx);A.option_name=C;A.possibilities=possibilities
def format_message(A):
B=[A.message]
if A.possibilities:
if len(A.possibilities)==1:B.append(f"Did you mean {A.possibilities[0]}?")
else:C=sorted(A.possibilities);B.append(f"(Possible options: {', '.join(C)})")
return ' '.join(B)
class BadOptionUsage(UsageError):
def __init__(A,option_name,message,ctx=_A):UsageError.__init__(A,message,ctx);A.option_name=option_name
class BadArgumentUsage(UsageError):
def __init__(A,message,ctx=_A):UsageError.__init__(A,message,ctx)
class FileError(ClickException):
def __init__(A,filename,hint=_A):
C=filename;B=hint;D=filename_to_ui(C)
if B is _A:B='unknown error'
ClickException.__init__(A,B);A.ui_filename=D;A.filename=C
def format_message(A):return f"Could not open file {A.ui_filename}: {A.message}"
class Abort(RuntimeError):0
class Exit(RuntimeError):
__slots__='exit_code',
def __init__(A,code=0):A.exit_code=code | {
"repo_name": "rochacbruno/dynaconf",
"path": "dynaconf/vendor/click/exceptions.py",
"copies": "1",
"size": "3297",
"license": "mit",
"hash": -259015208080167700,
"line_mean": 42.3947368421,
"line_max": 149,
"alpha_frac": 0.6803154383,
"autogenerated": false,
"ratio": 2.50531914893617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8178726434778969,
"avg_score": 0.10138163049144006,
"num_lines": 76
} |
anon = lambda a, c={'key':
555}, e=fff: None
anon : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
lambda : meta.lambda-function.python, source.python, storage.type.function.lambda.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
a : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
, : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.parameters.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
c : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
= : keyword.operator.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
{ : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.dict.begin.python, source.python
' : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.string.begin.python, source.python, string.quoted.single.python
key : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, string.quoted.single.python
' : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.string.end.python, source.python, string.quoted.single.python
: : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.dict.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
555 : constant.numeric.dec.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
} : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.definition.dict.end.python, source.python
, : meta.function.lambda.parameters.python, meta.lambda-function.python, punctuation.separator.parameters.python, source.python
: meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
e : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python, variable.parameter.function.language.python
= : keyword.operator.python, meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
fff : meta.function.lambda.parameters.python, meta.lambda-function.python, source.python
: : meta.lambda-function.python, punctuation.section.function.lambda.begin.python, source.python
: source.python
None : constant.language.python, source.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/functions/lambda7.py",
"copies": "1",
"size": "3115",
"license": "mit",
"hash": 7420631232700205000,
"line_mean": 90.6176470588,
"line_max": 171,
"alpha_frac": 0.713964687,
"autogenerated": false,
"ratio": 4.14780292942743,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0240320938386873,
"num_lines": 34
} |
#An online JavaScript program with all function dependencies: http://jsfiddle.net/JPnAF/9/
#print([1,1,1,2,3][0:5])
#This thing is implemented!
def getArrayFromFunctionComment(theComment):
theComment = theComment[len("requires functions: "):len(theComment)]
return theComment.split(", ")
#print getArrayFromFunctionComment("requires functions: False")
#print getArrayFromFunctionComment("requires functions: foo, bar, baz, biff, buff")
"function name: getStringFromFileTwo"
"requires functions: getStringFromFile, gorp"
"is defined: False"
"description: Hi this is the description."
"function name: getStringFromFile"
"requires functions: gorp"
"is defined: False"
"description: Hi this izz the description."
"function name: gorp"
"requires functions: False"
"is defined: False"
"description: Hi this izz the description."
"an irrelevant comment"
"another irrelevant comment"
'''This is implemented!'''
def getStringFromFile(fileName):
with open (fileName, "r") as myfile:
data=myfile.read();
return data;
'''This is implemented!'''
def getCommentArrayFromFile(fileName):
theString = getStringFromFile(fileName)
theStrings = theString.split("\n")
theArr = []
i = 0
for current in theStrings:
if(len(current) > 1):
if(current[0] == '"'):
if(current[len(current) - 1] == '"'):
theArr += [current];
#if(current.indexOf('"function name:')):
i = i + 1
for idx, current in enumerate(theArr):
theArr[idx] = current.strip()
theArr[idx] = current[1:len(current)-1]
return theArr
def splitTheComment(theComment):
return theComment.split(": ")
def getTwoDCommentArrayFromFile(theFile):
theCommentArray = getCommentArrayFromFile(theFile);
theNewCommentArray = {}
currentFunctionName = ""
for current in theCommentArray:
#print(current)
theSplitComment = splitTheComment(current)
#print(theSplitComment)
if(theSplitComment[0] == "function name"):
currentFunctionName = theSplitComment[1]
theNewCommentArray[currentFunctionName] = {}
elif(theSplitComment[0] == "requires functions"):
theNewCommentArray[currentFunctionName]["requires functions"] = getArrayFromFunctionComment(current)
elif(theSplitComment[0] == "is defined"):
theNewCommentArray[currentFunctionName]["is defined"] = theSplitComment[1]
#print(theSplitComment)
elif(theSplitComment[0] == "description"):
theNewCommentArray[currentFunctionName]["description"] = theSplitComment[1]
#print(theSplitComment)
return theNewCommentArray
#print(getCommentArrayFromFile("functionChecker.py"))
#print(getTwoDCommentArrayFromFile("functionChecker.py"))
alreadyCheckedFunctions = []
def printImplementableFunctions(theFile,functionToPrint):
global alreadyCheckedFunctions;
theCommentArray = getTwoDCommentArrayFromFile(theFile)
def requiresFunctions(theFunction):
return theCommentArray[theFunction]["requires functions"]
def isDefined(theFunction):
return theCommentArray[theFunction]["is defined"]
def theDescription(theFunction):
return theCommentArray[theFunction]["description"]
def canBeImplemented(theFunction):
if(isDefined(theFunction) == "True"):
return False
for current in requiresFunctions(theFunction):
if(current == "False"):
return True
for current in requiresFunctions(theFunction):
if(isDefined(current) == "False"):
return False
return True
def printFunctionInfo(theFunction):
print(theFunction)
print(" requires functions: " + str(requiresFunctions(theFunction)))
print(" is defined: " + isDefined(theFunction))
print(" description: " + theDescription(theFunction))
print(" can be implemented: " + str(canBeImplemented(theFunction)))
for current in requiresFunctions(functionToPrint):
if current != "False":
if(isDefined(current) == "False"):
if(not current in alreadyCheckedFunctions):
printImplementableFunctions(theFile, current)
if(canBeImplemented(functionToPrint) == True):
printFunctionInfo(functionToPrint)
if(not functionToPrint in alreadyCheckedFunctions):
alreadyCheckedFunctions += [functionToPrint]
#printImplementableFunctions("functionChecker.py", "getStringFromFileTwo")
def functionChecker(thing1, thing2):
printImplementableFunctions(thing1, thing2);
| {
"repo_name": "jarble/EngScript",
"path": "libraries/functionChecker.py",
"copies": "2",
"size": "4234",
"license": "mit",
"hash": 8099253121670752000,
"line_mean": 30.362962963,
"line_max": 103,
"alpha_frac": 0.7522437411,
"autogenerated": false,
"ratio": 3.4962840627580514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9866616798987119,
"avg_score": 0.07638220097418663,
"num_lines": 135
} |
# An online retailer sells two products: widgets and gizmos. Each widget weighs 75
# grams. Each gizmo weighs 112 grams. Write a program that reads the number of
# widgets and the number of gizmos in an order from the user. Then your program
# should compute and display the total weight of the order.
import collections
GIZMO_WEIGHT = 112
WIDGET_WEIGHT = 75
Cart = collections.namedtuple('Cart', ['widgets', 'gizmos'])
class WidgetsGizmos:
def main(self):
cart = self.user_input()
weight = self.calculate_total_weight(cart)
print('Total weight = {:.3f}'.format(weight))
def user_input(self):
try:
widgets = int(input('How many widgets have you added in the order? -> '))
gizmos = int(input('How many gizmos have you added in the order? -> '))
return Cart(widgets, gizmos)
except ValueError:
print('Invalid input.')
return None
def calculate_total_weight(self, cart):
if cart is None:
return None
if type(cart) is not Cart:
raise TypeError('input is not of the correct type')
if cart.gizmos is not None and cart.widgets is not None:
if (cart.gizmos >= 0) and (cart.widgets >= 0):
return (cart.gizmos * GIZMO_WEIGHT + cart.widgets * WIDGET_WEIGHT)
return None
if __name__ == '__main__':
WidgetsGizmos().main()
| {
"repo_name": "devak23/python",
"path": "python_workbook/ch01/p08_widgets_and_gizmos.py",
"copies": "1",
"size": "1421",
"license": "mit",
"hash": -7046759879465662000,
"line_mean": 31.2954545455,
"line_max": 85,
"alpha_frac": 0.6256157635,
"autogenerated": false,
"ratio": 3.779255319148936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9899458690100791,
"avg_score": 0.001082478509628972,
"num_lines": 44
} |
# An O(n log n) comparison-based sorting algorithm.
# Most implementations produce a stable sort, which means that the
# implementation preserves the input order of equal elements
# in the sorted output.
# Mergesort is a divide and conquer algorithm
# Conceptually, a merge sort works as follows:
# 1) Divide the unsorted list into n sublists,
# each containing 1 element (a list of 1 element is considered sorted).
# 2) Repeatedly merge sublists to produce new sorted sublists until
# there is only 1 sublist remaining. This will be the sorted list.
# one of mergesort’s most attractive properties is
# that it guarantees to sort any array of N items in
# time proportional to N log N. Its prime
# disadvantage is that it uses extra space proportional to N.
def merge_sort(sequence):
if len(sequence) > 1:
sorted_sequence = []
# print("cak")
left_half = merge_sort(sequence[:len(sequence) // 2])
print("hop")
right_half = merge_sort(sequence[len(sequence) // 2:])
# print("bam")
left_index = 0
right_index = 0
while len(sequence) != len(sorted_sequence):
if len(left_half) > left_index and len(right_half) > right_index:
if left_half[left_index] <= right_half[right_index]:
sorted_sequence.append(left_half[left_index])
left_index += 1
elif left_half[left_index] > right_half[right_index]:
sorted_sequence.append(right_half[right_index])
right_index += 1
elif len(left_half) == left_index:
sorted_sequence.append(right_half[right_index])
right_index += 1
elif len(right_half) == right_index:
sorted_sequence.append(left_half[left_index])
left_index += 1
return sorted_sequence
else:
# print("tuka sum")
return sequence
def main():
# sequence = [8, 10, 3, 7, 13, 0, -5, 2, 42, 6, 8, 92, 34, 67, 43, 78, 65, 43, 3, 23, 98, 10, 1000000]
sequence = [7, 2, 11, 15, 3, 0, 2]
print(merge_sort(sequence))
if __name__ == '__main__':
main()
| {
"repo_name": "pepincho/Python101-and-Algo1-Courses",
"path": "Algo-1/week1/5-Sorting-Python/merge_sort.py",
"copies": "2",
"size": "2188",
"license": "mit",
"hash": 132190013410671970,
"line_mean": 30.2285714286,
"line_max": 106,
"alpha_frac": 0.6043000915,
"autogenerated": false,
"ratio": 3.743150684931507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5347450776431507,
"avg_score": null,
"num_lines": null
} |
# a non-reconfigurable simulation that always goes to overflow
from binreconfiguration.simulator.nonreconfigurable import Overflow
from binreconfiguration.simulator.repeater import Repeater
# the strategy we are going to use (FirstFit consider the bins always in the same
# order and select the first with enough room to accomodate a given item)
from binreconfiguration.strategy import FirstFit
# other strategies include
# from binreconfiguration.strategy import RandomFit
# from binreconfiguration.strategy import MinLoad
# from binreconfiguration.strategy import MaxLoad
# from binreconfiguration.strategy import MinCount
# from binreconfiguration.strategy import MaxCount
# from binreconfiguration.strategy import MinFreeSpace
# from binreconfiguration.strategy import MinFreeSpace
# from binreconfiguration.strategy import MinAverageItemSize
# from binreconfiguration.strategy import MaxAverageItemSize
# a SnapshortReport is a convenient objet for reporting the simulation
from binreconfiguration.storageunit import SnapshotReporter
# Bin object
from binreconfiguration.item import Item
# Item generator
from binreconfiguration.itemgenerator import Uniform
# our item generator
generator = Uniform()
# overflow simulator: 5 bins, each of unit size
number_of_bins = 2
capacity = 1.
simulator = Overflow(number_of_bins, capacity)
# run the simulator till overflow using the FirstFir strategy.
# The simulator gracefully intercepts the overflow exception.
snapshots = simulator.run(FirstFit, generator)
# we are now ready to read/report/analyse the results of the simulation
# some vocabulary
# - storage unit: a collection of bins of equal capacity
# - snapshot: a detailled picture of a storage unit
# - snapshots: in our context, a sequence of pictures of a storage unit (each picture
# is taken after adding an element since we are using a non-reconfigurable
# storage unit)
# - snapshot_reporter: an object for easy access to the snapshots.
# raw data first.
# we display the snapshots, and each snapshot is actually a snapshot of a bin.
for (index, snapshot) in enumerate(snapshots):
print("{} {}\n\n".format(index+1, str(snapshot)))
# done with raw data, let's use a reporter for easy access.
snapshot_reporter = SnapshotReporter(snapshots)
# Full report of the simutation
# - report the sizes (i.e. number of items) of the bins at each step
# - report the loads (i.e. size / capacity) of the bins at each step
# - report the number of items in the bins at each iteration
print("Full report")
print("bin size:")
for i, sizes in enumerate(snapshot_reporter.values('size')):
print("#{}: {}".format(i+1, sizes))
print("bin load:")
for i, loads in enumerate(snapshot_reporter.values('load')):
print("#{}: {}".format(i+1, loads))
print("bin count")
for i, counts in enumerate(snapshot_reporter.values('count')):
print("#{}: {}".format(i+1, counts))
print("\n")
# Average report of the simulation
# - report the average size of the bins at each step
# - report the average load of the bins at each step
# - report the average count of the bins at each step
print("Average")
print("average size")
for i, size in enumerate(snapshot_reporter.average('size')):
print("#{}: {}".format(i+1, size))
print("average load")
for i, load in enumerate(snapshot_reporter.average('load')):
print("#{}: {}".format(i+1, load))
print("average count")
for i, count in enumerate(snapshot_reporter.average('count')):
print("#{}: {}".format(i+1, count))
print("\n")
# Min report of the simulation
# - report the min size of the bins at each step
# - report the min load of the bins at each step
# - report the min count of the bins at each step
print("Min")
print("min size")
for i, size in enumerate(snapshot_reporter.min('size')):
print("#{}: {}".format(i+1, size))
print("min load")
for i, load in enumerate(snapshot_reporter.min('load')):
print("#{}: {}".format(i+1, load))
print("min count")
for i, count in enumerate(snapshot_reporter.min('count')):
print("#{}: {}".format(i+1, count))
print("\n")
# Max report of the simulation
# - report the max size of the bins at each step
# - report the max load of the bins at each step
# - report the max count of the bins at each step
print("Max")
print("max size")
for i, size in enumerate(snapshot_reporter.max('size')):
print("#{}: {}".format(i+1, size))
print("max load")
for i, load in enumerate(snapshot_reporter.max('load')):
print("#{}: {}".format(i+1, load))
print("max count")
for i, count in enumerate(snapshot_reporter.max('count')):
print("#{}: {}".format(i+1, count))
print("\n")
# Report last step of the simulation
print("Last configuration report")
print("average size = {}".format(snapshot_reporter.average('size')[-1]))
print("average load = {}".format(snapshot_reporter.average('load')[-1]))
print("average count = {}".format(snapshot_reporter.average('count')[-1]))
print("min size = {}".format(snapshot_reporter.min('size')[-1]))
print("min load = {}".format(snapshot_reporter.min('load')[-1]))
print("min count = {}".format(snapshot_reporter.min('count')[-1]))
print("max size = {}".format(snapshot_reporter.max('size')[-1]))
print("max load = {}".format(snapshot_reporter.max('load')[-1]))
print("max count = {}".format(snapshot_reporter.max('count')[-1]))
print("\n")
# The library provides convenient last_* methods for accessing the last step of the simulation
print("Last configuration report")
print("average size = {}".format(snapshot_reporter.last_average('size')))
print("average load = {}".format(snapshot_reporter.last_average('load')))
print("average count = {}".format(snapshot_reporter.last_average('count')))
print("min size = {}".format(snapshot_reporter.last_min('size')))
print("min load = {}".format(snapshot_reporter.last_min('load')))
print("min count = {}".format(snapshot_reporter.last_min('count')))
print("max size = {}".format(snapshot_reporter.last_max('size')))
print("max load = {}".format(snapshot_reporter.last_max('load')))
print("max count = {}".format(snapshot_reporter.last_max('count')))
print("\n")
| {
"repo_name": "vialette/binreconfiguration",
"path": "essai.py",
"copies": "2",
"size": "6079",
"license": "mit",
"hash": 2491312953769495600,
"line_mean": 39.7986577181,
"line_max": 94,
"alpha_frac": 0.7226517519,
"autogenerated": false,
"ratio": 3.657641395908544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01059556112426355,
"num_lines": 149
} |
''' anonymise the data module.
functions:
master: selects the type of anonymisation to be performed on the data
'''
# import csv
import timeit
from . import k_anonymity
def master(start_dataframe, nums, kmin, save_to_file, anonym_file, logger):
'''
perform simple K-anonymity for now. Random generation of adequate number
of extra entries
Arguments:
start_dataframe: the dataframe to anonymise
nums(list): list of column numbers considered an identifying combo
kmin(str): the minimum number of entries desired
save_to_file(bool): true to save the output dataframe to temporary file
anonym_file(str): the file name for the output file
logger: custom logging method
Returns:
dataframe with added false entries
'''
total_anonymise_start = timeit.default_timer()
logger.info('running k-anonymity on columns : '+str(nums)+' with kmin : '
+ str(kmin))
logger.info('dataframe before anonymisation : ' +
str(start_dataframe.shape))
anonymized_and_masked_data = k_anonymity.simple_kanonymity(start_dataframe,
nums, kmin, logger)
# logging the outcome
logger.info('dataframe after anonymisation : ' +
str(anonymized_and_masked_data.shape))
# saving to file if that option was set to True
if save_to_file:
anonymized_and_masked_data.to_csv(anonym_file, index=False, header=False)
total_anonymise_stop = timeit.default_timer()
# logging the excecution time
logger.info(" Total anonymisation time is:" +
str(total_anonymise_stop-total_anonymise_start))
return anonymized_and_masked_data
if __name__ == '__main__':
master()
| {
"repo_name": "GeorgeManakanatas/PPDM",
"path": "data_anonym_methods/anonymise_the_data.py",
"copies": "1",
"size": "1793",
"license": "mit",
"hash": -7723579837792312000,
"line_mean": 35.5918367347,
"line_max": 82,
"alpha_frac": 0.648633575,
"autogenerated": false,
"ratio": 3.906318082788671,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5054951657788671,
"avg_score": null,
"num_lines": null
} |
"""Anonymize ip addresses
Revision ID: 414d69d7ee76
Revises: 58abc4899824
Create Date: 2020-01-27 20:54:54.304630
"""
# revision identifiers, used by Alembic.
revision = '414d69d7ee76'
down_revision = '58abc4899824'
import ipaddress
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
metadata = sa.MetaData()
use_logs = sa.Table('UseLogs', metadata,
sa.Column('id', sa.Integer()),
sa.Column('ip_address', sa.Unicode(100)),
)
def anonymize_ip_address(ip_address):
if not ip_address:
return ip_address
potential_ip_addresses = []
for potential_ip_address in ip_address.split(','):
potential_ip_address = potential_ip_address.strip()
if not isinstance(potential_ip_address, unicode):
potential_ip_address = potential_ip_address.decode()
try:
complete_ip_address = ipaddress.ip_address(potential_ip_address)
except:
# Error parsing potential_origin
continue
# Remove 80 bits or 8 bits, depending on the version
if complete_ip_address.version == 6:
bytes_removed = 10
elif complete_ip_address.version == 4:
bytes_removed = 1
else:
raise Exception("IP version {} not supported: {}".format(complete_ip_address.version, potential_ip_address))
anonymized_packed = complete_ip_address.packed[:-bytes_removed] + (b'\x00' * bytes_removed)
anonymized_ip_address = ipaddress.ip_address(anonymized_packed)
potential_ip_addresses.append(anonymized_ip_address.compressed)
return ', '.join(potential_ip_addresses)
def upgrade():
for use_row in op.get_bind().execute(sql.select([use_logs.c.id, use_logs.c.ip_address])):
use_id = use_row[use_logs.c.id]
ip_address = use_row[use_logs.c.ip_address]
new_ip_address = anonymize_ip_address(ip_address)
stmt = use_logs.update().where(use_logs.c.id == use_id).values(ip_address=new_ip_address)
op.execute(stmt)
def downgrade():
pass
| {
"repo_name": "go-lab/labmanager",
"path": "alembic/versions/414d69d7ee76_anonymize_ip_addresses.py",
"copies": "4",
"size": "2049",
"license": "bsd-2-clause",
"hash": -220701855457583200,
"line_mean": 30.0454545455,
"line_max": 120,
"alpha_frac": 0.6583699366,
"autogenerated": false,
"ratio": 3.4611486486486487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006976295030411231,
"num_lines": 66
} |
"""Anonymize MBDS messages in HL7 2.5 format
NB - MBDS (Minimum Biosurveillance Data Set) messages are mostly
scrubbed, this is almost certainly NOT adequate to protect a generic
HL7 2.5 message.
"""
import argparse
import hl7
import sys
from pheme.anonymize.alter import anon_term
from pheme.anonymize.field_map import anon_map
class MBDS_anon(object):
def __init__(self, msg):
self.msg = hl7.parse(msg)
def anonymize(self):
"""apply the anonymize map to the instance message
returns an anonymized version of the message.
"""
# preserve idempotence
if hasattr(self, '_anonymized'):
return str(unicode(self.msg))
# apply all anon methods applicable to this message
for hl7segment in self.msg:
segment = str(hl7segment[0][0]) # MSH, PID, OBX, etc.
if segment in anon_map:
for element in anon_map[segment].keys(): # i.e. EVN-2
for component in anon_map[segment][element].keys():
anon_method = anon_map[segment][element][component]
# adjust hl7 one versus zero index
try:
cur_val = hl7segment[element][component - 1]
except IndexError:
# said component not in the hl7segment
# safe to ignore and continue
continue
hl7segment[element][component - 1] =\
anon_term(term=cur_val, func=anon_method)
self._anonymized = True
return str(unicode(self.msg))
def message_at_a_time(fileobj):
"""Generator to yield a complete HL/7 message at a time till exhausted
:param fileobj: open filelike obj ready to read and yield a line at a time
"""
field_sep = '|^~\&|'
segment_id_len = len('MSH') # or 'FHS', 'BHS'...
input = fileobj.read()
msg_start = 0
# The field_sep is just beyond the message break. Find and roll back
while msg_start < len(input):
start_search = msg_start + len(field_sep) + segment_id_len
next_sep = input.find(field_sep, start_search)
if next_sep != -1:
msg = input[msg_start:next_sep-segment_id_len]
msg_start = next_sep-segment_id_len
yield msg
else:
# Fell off end looking for next sep, return what's left
yield input[msg_start:]
break
raise StopIteration
def anonymize_file():
"""Entry point to convert hl7 batch file to anon version
parameters are read from the command line. call with '-h' for
options and documentation
"""
parser = argparse.ArgumentParser()
parser.add_argument("file", type=file, help="the file to anonymize")
parser.add_argument("-o", "--output",
help="file for output, by default hits stdout")
args = parser.parse_args()
if args.output:
output = open(args.output, 'wb')
else:
output = sys.stdout
for nextline in message_at_a_time(args.file):
parser = MBDS_anon(nextline.replace('\n', '\r'))
output.write(parser.anonymize())
output.write('\r')
if args.output:
output.close()
| {
"repo_name": "pbugni/pheme.anonymize",
"path": "pheme/anonymize/mbds_hl7.py",
"copies": "1",
"size": "3312",
"license": "bsd-3-clause",
"hash": -5713788442162762000,
"line_mean": 32.7959183673,
"line_max": 78,
"alpha_frac": 0.5818236715,
"autogenerated": false,
"ratio": 3.942857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024680814357143,
"avg_score": null,
"num_lines": null
} |
# anonymize.py
"""Read a dicom file (or directory of files), partially "anonymize" it (them),
by replacing Person names, patient id, optionally remove curves
and private tags, and write result to a new file (directory)
This is an example only; use only as a starting point.
"""
# Carlo Mancini script made starting from:
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
# Use at your own risk!!
# Many more items need to be addressed for proper de-identifying DICOM data.
# In particular, note that pixel data could have confidential data "burned in"
# Annex E of PS3.15-2011 DICOM standard document details what must be done to
# fully de-identify DICOM data
from __future__ import print_function
usage = """
Usage:
python anonymize.py dicomfile.dcm outputfile.dcm
OR
python anonymize.py originals_directory anonymized_directory
Note: Use at your own risk. Does not fully de-identify the DICOM data as per
the DICOM standard, e.g in Annex E of PS3.15-2011.
"""
import os
import os.path
try:
import dicom
from dicom.errors import InvalidDicomError
except ImportError:
import pydicom as dicom
from pydicom.errors import InvalidDicomError
def anonymizefile(filename, output_filename, new_person_name="AUTO",
new_patient_id="id", remove_curves=True, remove_private_tags=True):
"""Replace data element values to partly anonymize a DICOM file.
Note: completely anonymizing a DICOM file is very complicated; there
are many things this example code does not address. USE AT YOUR OWN RISK.
"""
# Define call-back functions for the dataset.walk() function
def PN_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.VR == "PN":
data_element.value = new_person_name
def curves_callback(ds, data_element):
"""Called from the dataset "walk" recursive function for all data elements."""
if data_element.tag.group & 0xFF00 == 0x5000:
del ds[data_element.tag]
# Load the current dicom file to 'anonymize'
dataset = dicom.read_file(filename)
oldname = dataset.PatientsName.split('^')
if new_person_name=="AUTO":
new_person_name = oldname[0][0]+oldname[0][1]+oldname[1][0]+oldname[1][1]
print("new person name:",new_person_name)
# Remove patient name and any other person names
dataset.walk(PN_callback)
# Change ID
dataset.PatientID = new_patient_id
# Remove data elements (should only do so if DICOM type 3 optional)
# Use general loop so easy to add more later
# Could also have done: del ds.OtherPatientIDs, etc.
for name in ['OtherPatientIDs', 'OtherPatientIDsSequence']:
if name in dataset:
delattr(dataset, name)
# Same as above but for blanking data elements that are type 2.
for name in ['PatientBirthDate']:
if name in dataset:
dataset.data_element(name).value = ''
# Remove private tags if function argument says to do so. Same for curves
if remove_private_tags:
dataset.remove_private_tags()
if remove_curves:
dataset.walk(curves_callback)
# write the 'anonymized' DICOM out under the new filename
dataset.save_as(output_filename)
# Can run as a script:
# if __name__ == "__main__":
# import sys
# if len(sys.argv) != 3:
# print(usage)
# sys.exit()
# arg1, arg2 = sys.argv[1:]
def anonymize(inp, out, new_person_name="AUTO",verbose=False):
if os.path.isdir(inp):
in_dir = inp
out_dir = out
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
raise IOError("Input is directory; output name exists but is not a directory")
else: # out_dir does not exist; create it.
os.makedirs(out_dir)
filenames = os.listdir(in_dir)
for filename in filenames:
if filename=="DICOMDIR": continue
if not os.path.isdir(os.path.join(in_dir, filename)):
print(filename + "...", end='')
try:
if verbose:
print("anonymize",os.path.join(in_dir, filename),
os.path.join(out_dir, filename),
new_person_name)
anonymizefile(os.path.join(in_dir, filename),
os.path.join(out_dir, filename),
new_person_name)
except InvalidDicomError:
print("Not a valid dicom file, may need force=True on read_file\r")
else:
print("done\r")
else: # first arg not a directory, assume two files given
in_filename = inp
out_filename = out
anonymizefile(in_filename, out_filename, new_person_name)
print()
| {
"repo_name": "carlomt/dicom_tools",
"path": "dicom_tools/anonymize.py",
"copies": "1",
"size": "5068",
"license": "mit",
"hash": 3369397908102993400,
"line_mean": 37.1052631579,
"line_max": 94,
"alpha_frac": 0.6387134964,
"autogenerated": false,
"ratio": 3.7596439169139466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9867417173426496,
"avg_score": 0.006188047977490304,
"num_lines": 133
} |
"""Anonymize static database data from YAML formatted file
"""
import argparse
import sys
import yaml
from pheme.anonymize.alter import anon_term
from pheme.anonymize.field_map import anon_map
from pheme.longitudinal.static_data import SUPPORTED_DAOS
from pheme.longitudinal.static_data import obj_repr, obj_loader
import pheme.longitudinal.tables as tables
def facility_anon(self):
from .field_map import short_string, ten_digits_starting_w_1
from .field_map import five_digits, site_string
from .alter import fixed_length_string
self.county = anon_term(term=self.county, func=short_string)
self.npi = int(anon_term(term=self.npi, func=ten_digits_starting_w_1))
self.zip = str(five_digits(self.zip)) # ignore cached!!
self.organization_name = anon_term(term=self.organization_name,
func=site_string)
self.local_code = anon_term(term=self.local_code,
func=fixed_length_string(3))
def region_anon(self):
from .field_map import ten_digits_starting_w_1
from .alter import fixed_length_string
self.region_name = anon_term(term=self.region_name,
func=fixed_length_string(4))
self.dim_facility_pk = int(anon_term(term=self.dim_facility_pk,
func=ten_digits_starting_w_1))
for type in SUPPORTED_DAOS:
klass = getattr(tables, type)
yaml.add_representer(klass, obj_repr)
if type == 'Facility':
klass.anonymize = facility_anon
elif type == 'ReportableRegion':
klass.anonymize = region_anon
else:
klass.anonymize = lambda(self): None
def anonymize_file():
"""Entry point to convert yaml db file to anon version
parameters are read from the command line. call with '-h' for
options and documentation
"""
parser = argparse.ArgumentParser()
parser.add_argument("file", type=file, help="the file to anonymize")
parser.add_argument("-o", "--output",
help="file for output, by default hits stdout")
args = parser.parse_args()
if args.output:
output = open(args.output, 'wb')
else:
output = sys.stdout
yaml.add_constructor(u'!DAO', obj_loader)
objects = yaml.load(args.file.read())
for obj in objects:
obj.anonymize()
output.write(yaml.dump(objects, default_flow_style=False))
if args.output:
output.close()
| {
"repo_name": "pbugni/pheme.anonymize",
"path": "pheme/anonymize/db_static_data.py",
"copies": "1",
"size": "2468",
"license": "bsd-3-clause",
"hash": -4979112238857900000,
"line_mean": 32.3513513514,
"line_max": 74,
"alpha_frac": 0.6507293355,
"autogenerated": false,
"ratio": 3.5924308588064044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47431601943064045,
"avg_score": null,
"num_lines": null
} |
# An OOP approach to representing and manipulating matrices
class Matrix:
"""
Matrix object generated from a 2D array where each element is an array representing
a row.
Rows can contain type int or float.
Common operations and information available.
>>> rows = [
... [1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]
... ]
>>> matrix = Matrix(rows)
>>> print(matrix)
[[1. 2. 3.]
[4. 5. 6.]
[7. 8. 9.]]
Matrix rows and columns are available as 2D arrays
>>> print(matrix.rows)
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> print(matrix.columns())
[[1, 4, 7], [2, 5, 8], [3, 6, 9]]
Order is returned as a tuple
>>> matrix.order
(3, 3)
Squareness and invertability are represented as bool
>>> matrix.is_square
True
>>> matrix.is_invertable()
False
Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be
a Matrix or Nonetype
>>> print(matrix.identity())
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
>>> print(matrix.minors())
[[-3. -6. -3.]
[-6. -12. -6.]
[-3. -6. -3.]]
>>> print(matrix.cofactors())
[[-3. 6. -3.]
[6. -12. 6.]
[-3. 6. -3.]]
>>> # won't be apparent due to the nature of the cofactor matrix
>>> print(matrix.adjugate())
[[-3. 6. -3.]
[6. -12. 6.]
[-3. 6. -3.]]
>>> print(matrix.inverse())
None
Determinant is an int, float, or Nonetype
>>> matrix.determinant()
0
Negation, scalar multiplication, addition, subtraction, multiplication and
exponentiation are available and all return a Matrix
>>> print(-matrix)
[[-1. -2. -3.]
[-4. -5. -6.]
[-7. -8. -9.]]
>>> matrix2 = matrix * 3
>>> print(matrix2)
[[3. 6. 9.]
[12. 15. 18.]
[21. 24. 27.]]
>>> print(matrix + matrix2)
[[4. 8. 12.]
[16. 20. 24.]
[28. 32. 36.]]
>>> print(matrix - matrix2)
[[-2. -4. -6.]
[-8. -10. -12.]
[-14. -16. -18.]]
>>> print(matrix ** 3)
[[468. 576. 684.]
[1062. 1305. 1548.]
[1656. 2034. 2412.]]
Matrices can also be modified
>>> matrix.add_row([10, 11, 12])
>>> print(matrix)
[[1. 2. 3.]
[4. 5. 6.]
[7. 8. 9.]
[10. 11. 12.]]
>>> matrix2.add_column([8, 16, 32])
>>> print(matrix2)
[[3. 6. 9. 8.]
[12. 15. 18. 16.]
[21. 24. 27. 32.]]
>>> print(matrix * matrix2)
[[90. 108. 126. 136.]
[198. 243. 288. 304.]
[306. 378. 450. 472.]
[414. 513. 612. 640.]]
"""
def __init__(self, rows):
error = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float."
)
if len(rows) != 0:
cols = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(row) != cols:
raise error
for value in row:
if not isinstance(value, (int, float)):
raise error
self.rows = rows
else:
self.rows = []
# MATRIX INFORMATION
def columns(self):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def num_rows(self):
return len(self.rows)
@property
def num_columns(self):
return len(self.rows[0])
@property
def order(self):
return (self.num_rows, self.num_columns)
@property
def is_square(self):
return self.order[0] == self.order[1]
def identity(self):
values = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(values)
def determinant(self):
if not self.is_square:
return None
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return self.rows[0][0]
if self.order == (2, 2):
return (self.rows[0][0] * self.rows[1][1]) - (
self.rows[0][1] * self.rows[1][0]
)
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns)
)
def is_invertable(self):
return bool(self.determinant())
def get_minor(self, row, column):
values = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(values).determinant()
def get_cofactor(self, row, column):
if (row + column) % 2 == 0:
return self.get_minor(row, column)
return -1 * self.get_minor(row, column)
def minors(self):
return Matrix(
[
[self.get_minor(row, column) for column in range(self.num_columns)]
for row in range(self.num_rows)
]
)
def cofactors(self):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
]
)
def adjugate(self):
values = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(values)
def inverse(self):
determinant = self.determinant()
return None if not determinant else self.adjugate() * (1 / determinant)
def __repr__(self):
return str(self.rows)
def __str__(self):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(self.rows[0]) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(value) for value in row]) + ".]"
for row in self.rows
]
)
+ "]"
)
# MATRIX MANIPULATION
def add_row(self, row, position=None):
type_error = TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(row, list):
raise type_error
for value in row:
if not isinstance(value, (int, float)):
raise type_error
if len(row) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix"
)
if position is None:
self.rows.append(row)
else:
self.rows = self.rows[0:position] + [row] + self.rows[position:]
def add_column(self, column, position=None):
type_error = TypeError(
"Column must be a list containing all ints and/or floats"
)
if not isinstance(column, list):
raise type_error
for value in column:
if not isinstance(value, (int, float)):
raise type_error
if len(column) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix"
)
if position is None:
self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
self.rows = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
# MATRIX OPERATIONS
def __eq__(self, other):
if not isinstance(other, Matrix):
raise TypeError("A Matrix can only be compared with another Matrix")
return self.rows == other.rows
def __ne__(self, other):
return not self == other
def __neg__(self):
return self * -1
def __add__(self, other):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
]
)
def __sub__(self, other):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
]
)
def __mul__(self, other):
if isinstance(other, (int, float)):
return Matrix([[element * other for element in row] for row in self.rows])
elif isinstance(other, Matrix):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second"
)
return Matrix(
[
[Matrix.dot_product(row, column) for column in other.columns()]
for row in self.rows
]
)
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix"
)
def __pow__(self, other):
if not isinstance(other, int):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable:
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power"
)
result = self
for i in range(other - 1):
result *= self
return result
@classmethod
def dot_product(cls, row, column):
return sum(row[i] * column[i] for i in range(len(row)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "TheAlgorithms/Python",
"path": "matrix/matrix_class.py",
"copies": "1",
"size": "10980",
"license": "mit",
"hash": 2074273543741627000,
"line_mean": 28.6703910615,
"line_max": 87,
"alpha_frac": 0.4786885246,
"autogenerated": false,
"ratio": 3.946800862688713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4925489387288713,
"avg_score": null,
"num_lines": null
} |
##An OO python client for the frame_protocol_firmware.ino firmware.
##Supports four operations:
## setColor-- Sets all LEDS to a single color.
## setColors-- Pass an array of color values to set all LEDs individually.
## setBrightness-- Set the strip's brightness. (Blinkytape supports 0 through 93)
## reset-- Turn all the LEDs off and flush the tape-side serial buffer
## Requires third party module "pySerial." Project page: http://pyserial.sourceforge.net/
## Easiest to get it is from PyPi via pip: "pip install pyserial"
import serial
##Numeric indentifiers for the four commands our control protocol knows.
CODE_SET_COLORS = 0x01
CODE_SET_BRIGHTNESS = 0x02
CODE_SET_COLOR = 0x03
CODE_SET_COLOR_AT = 0x04
CODE_RESET = 0x00
BAUD_RATE = 115200
class BlinkyTape:
LED_COUNT = 60
#Constructor. Takes a serial URL (e.g. "COM6" or "/dev/ttyUSB0") indicating the
#serial port where your blinkytape is attached.
def __init__(self, serialURL):
self.serial = serial.serial_for_url(serialURL, BAUD_RATE)
def __del__(self):
if self.serial:
self.serial.close()
#Set all the LEDs of the strip to a single color. (An RGB or a value that can be used to construct an RGB)
def setColor(self, color):
if type(color) is not RGB:
color = RGB(color)
self.serial.write([CODE_SET_COLOR] + color.getByteList())
#Set the LED at a given index to a given color. (An RGB or a value that can be used to construct an RGB)
def setColorAt(self, index, color):
if index < 0 or index >= self.LED_COUNT:
raise Exception("Index out of bounds.")
if type(color) is not RGB:
color = RGB(color)
self.serial.write([CODE_SET_COLOR_AT, index] + color.getByteList())
#Takes an array of color values: RGBs or values that can be used to construct an RGB.
def setColors(self, leds):
if len(leds) != self.LED_COUNT:
raise Exception("Wrong number of values passed.")
self.serial.write([CODE_SET_COLORS])
for value in leds:
if type(value) is not RGB:
value = RGB(value)
self.serial.write(value.getByteList())
#Set the brightness level of your BlinkyTape. BlinkyTape's brightness values
#evidently go (rather arbitrarily) from 0 to 93.
#Note: Blinkytape gets very bright! Something more like 10 is recommended under dim lighting.
def setBrightness(self, brightness):
if brightness < 0 or brightness > 93:
raise Exception("Brightness out of allowable range.")
self.serial.write([CODE_SET_BRIGHTNESS, brightness])
#Reset the blinkytape. This causes the tape to set all its LEDs off and
#flush its serial read buffer.
def reset(self):
self.serial.write([CODE_RESET])
def close(self):
if self.serial:
self.reset()
self.serial.close()
class RGB:
def __init__(self, *args):
if len(args) == 1:
if type(args[0]) is tuple and len(args[0]) == 3:
(self.R, self.G, self.B) = args[0]
return
if type(args[0]) is list and len(args[0]) == 3:
(self.R, self.G, self.B) = args[0]
return
elif type(args[0]) is int:
(self.R, self.G, self.B) = args[0].to_bytes(3, "big")
return
elif len(args) == 3:
(self.R, self.G, self.B) = args
return
raise Exception("Couldn't decode the provided color value (if any).")
#Returns a single integer color code, meant to be read as a hex byte triplet: 0xRRGGBB
def getColorCode(self):
return self.R * 0x10000 + self.G * 0x100 + self.B
#Returns a tuple of independent color codes, (R, G, B)
def getByteTuple(self):
return (self.R, self.G, self.B)
#Returns a list of independent color codes, [R, G, B]
def getByteList(self):
return [self.R, self.G, self.B]
def __str__(self):
return hex(self.getColorCode())
def __repr__(self):
return self.__str__()
| {
"repo_name": "Nentuaby/blinkytape-advanced-protocol",
"path": "frame_protocol_client/BlinkyTape_3.py",
"copies": "1",
"size": "4123",
"license": "mit",
"hash": -1197683900212557800,
"line_mean": 37.5327102804,
"line_max": 110,
"alpha_frac": 0.6228474412,
"autogenerated": false,
"ratio": 3.5329905741216794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46558380153216794,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.