text stringlengths 0 1.05M | meta dict |
|---|---|
A simple doctest involves pasting the results of actually using a
function into a string at the beginning of the function. Doctest then
checks to make sure that the usage examples work, including
errors. Note -- doctest ignores traceback information, just include
the first line of the traceback and the actual error. In this case,
create a file called example1.py and put into it the following add
function. Simply, running example1.py is all that is necessary to go
thru the tests. If you run example1.py -y, you'll get verbose output.
def add(a,b):
"""
>>> import example1
>>> example1.add(1,2)
3
>>> example1.add([1],[2])
[1, 2]
>>> example1.add([1],2)
Traceback (most recent call last):
TypeError: can only concatenate list (not "int") to list
"""
return a+b
if __name__ == "__main__":
print '**running standard doctest'
import doctest,example1
doctest.testmod(example1)
#To put additional doctests somewhere else and make them a unittest
#testcase, put your tests in a different file like test1.txt. Note ---
#no quoting needed
>>> import example1
>>> example1.add('a','b')
'ab'
#then add a few likes of code to the end of example1.py to run the unittest
if __name__ == "__main__":
print '**running standard doctest'
import doctest,example1
doctest.testmod(example1)
print '**running unittest doctest'
suite = doctest.DocFileSuite('test1.txt')
unittest.TextTestRunner().run(suite)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/305292_doctest_unittest_pyth24s_cool_/recipe-305292.py",
"copies": "1",
"size": "1487",
"license": "mit",
"hash": 1964329685491468000,
"line_mean": 32.7954545455,
"line_max": 75,
"alpha_frac": 0.6933422999,
"autogenerated": false,
"ratio": 3.8723958333333335,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5065738133233333,
"avg_score": null,
"num_lines": null
} |
"""A simple drum pattern MIDI sequencer."""
from pyb import millis, delay
class Pattern:
velocities = {
"-": None, # continue note
".": 0, # off
"+": 10, # ghost
"s": 60, # soft
"m": 100, # medium
"x": 120, # hard
}
def __init__(self, src):
self.step = 0
self.instruments = []
self._active_notes = {}
pattern = (line.strip() for line in src.split('\n'))
pattern = (line for line in pattern
if line and not line.startswith('#'))
for line in pattern:
parts = line.split(" ", 2)
if len(parts) == 3:
note, hits, description = parts
elif len(parts) == 2:
note, hits = parts
description = None
else:
continue
note = int(note)
self.instruments.append((note, hits))
self.steps = max(len(hits) for _, hits in self.instruments)
def playstep(self, midiout, channel=10):
for note, hits in self.instruments:
velocity = self.velocities.get(hits[self.step])
if velocity is not None:
if self._active_notes.get(note):
# velocity==0 <=> note off
midiout.note_on(note, 0, ch=channel)
self._active_notes[note] = 0
if velocity > 0:
midiout.note_on(note, max(1, velocity), ch=channel)
self._active_notes[note] = velocity
self.step = (self.step + 1) % self.steps
class Sequencer:
def __init__(self, midiout, bpm=120, channel=10, volume=127):
self.midiout = midiout
self.mpt = 15000. / max(20, min(bpm, 400)) # millisec per tick (1/16)
self.channel = channel
self.volume = volume
def play(self, pattern, kit=None):
# channel volume
self.midiout.control_change(10, self.volume, ch=self.channel)
self.activate_drumkit(kit)
# give MIDI instrument some time to load drumkit
delay(200)
try:
while True:
last_tick = millis()
pattern.playstep(self.midiout, self.channel)
timetowait = max(0, self.mpt - (millis() - last_tick))
if timetowait > 0:
delay(int(timetowait))
finally:
# all sound off
self.midiout.control_change(120, 0, ch=self.channel)
def activate_drumkit(self, kit):
if isinstance(kit, (list, tuple)):
msb, lsb, pc = kit
else:
msb = lsb = None
if msb is not None:
# bank select msb
self.midiout.control_change(0, msb, ch=self.channel)
if lsb is not None:
# bank select lsb
self.midiout.control_change(32, lsb, ch=self.channel)
if kit is not None:
self.midiout.program_change(kit, ch=self.channel)
| {
"repo_name": "SpotlightKid/micropython-stm-lib",
"path": "midi/examples/drumseq.py",
"copies": "1",
"size": "2998",
"license": "mit",
"hash": 1276377677343559700,
"line_mean": 30.2291666667,
"line_max": 78,
"alpha_frac": 0.512341561,
"autogenerated": false,
"ratio": 3.7853535353535355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.979620700111544,
"avg_score": 0.0002976190476190476,
"num_lines": 96
} |
# A simple email notifier for xubuntu xfce4-panel using python imap and notify-osd notification server
# {this snippet works fine}
import urllib2
import feedparser
import os
def Authenticate(user, passwd):
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='New mail feed',
uri='https://mail.google.com',
user='%s@gmail.com' % user,
passwd=passwd
)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
feed = urllib2.urlopen('https://mail.google.com/mail/feed/atom')
return feed.read()
# notification service using Notify OSD server/daemon
def NotifyOSD(header, body):
notification_header = header
notification_body = body
os.system("notify-send '%s' '%s'" %(notification_header, notification_body))
# read mafeed parser
def AtomFeedParser(feed):
'''Parse the Atom feed and print a summary'''
atom = feedparser.parse(feed)
print ""
print atom.feed.title
print "You have %s new mails" % len(atom.entries)
# call to notification service
header = atom.feed.title
body = "You have %s new mails" % len(atom.entries)
for i in xrange(len(atom.entries)):
print atom.entries[i].title
print atom.entries[i].summary
print atom.entries[i].author
NotifyOSD(atom.entries[i].title, atom.entries[i].summary)
response_feed = Authenticate("testing.service.007", "testing007")
AtomFeedParser(response_feed)
| {
"repo_name": "cloud-engineering/xfc-email-notifier",
"path": "snippets/snippet_gmail_atom_feed_reader.py",
"copies": "1",
"size": "1506",
"license": "mit",
"hash": -432242933579478800,
"line_mean": 26.8888888889,
"line_max": 102,
"alpha_frac": 0.6812749004,
"autogenerated": false,
"ratio": 3.6911764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48724513709882356,
"avg_score": null,
"num_lines": null
} |
"""A simple embedding of an IPython kernel.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.lib.kernel import connect_qtconsole
from IPython.kernel.zmq.kernelapp import IPKernelApp
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
class SimpleKernelApp(object):
"""A minimal object that uses an IPython kernel and has a few methods
to manipulate a namespace and open Qt consoles tied to the kernel.
"""
def __init__(self, gui):
# Start IPython kernel with GUI event loop support
self.ipkernel = IPKernelApp.instance()
self.ipkernel.initialize(['python', '--gui=%s' % gui,
#'--log-level=10' # for debugging
])
# To create and track active qt consoles
self.consoles = []
# This application will also act on the shell user namespace
self.namespace = self.ipkernel.shell.user_ns
# Keys present at startup so we don't print the entire pylab/numpy
# namespace when the user clicks the 'namespace' button
self._init_keys = set(self.namespace.keys())
# Example: a variable that will be seen by the user in the shell, and
# that the GUI modifies (the 'Counter++' button increments it):
self.namespace['app_counter'] = 0
def print_namespace(self, evt=None):
print("\n***Variables in User namespace***")
for k, v in self.namespace.iteritems():
if k not in self._init_keys and not k.startswith('_'):
print('%s -> %r' % (k, v))
sys.stdout.flush()
def new_qt_console(self, evt=None):
"""start a new qtconsole connected to our kernel"""
return connect_qtconsole(self.ipkernel.connection_file,
profile=self.ipkernel.profile)
def count(self, evt=None):
self.namespace['app_counter'] += 1
def cleanup_consoles(self, evt=None):
for c in self.consoles:
c.kill()
def start(self):
self.ipkernel.start()
| {
"repo_name": "pioneers/topgear",
"path": "ipython-in-depth/exercises/Embedding/kapp.py",
"copies": "2",
"size": "2328",
"license": "apache-2.0",
"hash": -4110517832691204000,
"line_mean": 37.8,
"line_max": 78,
"alpha_frac": 0.5244845361,
"autogenerated": false,
"ratio": 4.751020408163265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6275504944263266,
"avg_score": null,
"num_lines": null
} |
"""A simple engine that talks to a controller over 0MQ.
it handles registration, etc. and launches a kernel
connected to the Controller's Schedulers.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
import time
from getpass import getpass
import zmq
from zmq.eventloop import ioloop, zmqstream
from IPython.external.ssh import tunnel
# internal
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.traitlets import (
Instance, Dict, Integer, Type, Float, Integer, Unicode, CBytes, Bool
)
from IPython.utils.py3compat import cast_bytes
from IPython.parallel.controller.heartmonitor import Heart
from IPython.parallel.factory import RegistrationFactory
from IPython.parallel.util import disambiguate_url
from IPython.kernel.zmq.session import Message
from IPython.kernel.zmq.ipkernel import Kernel
from IPython.kernel.zmq.kernelapp import IPKernelApp
class EngineFactory(RegistrationFactory):
"""IPython engine"""
# configurables:
out_stream_factory=Type('IPython.kernel.zmq.iostream.OutStream', config=True,
help="""The OutStream for handling stdout/err.
Typically 'IPython.kernel.zmq.iostream.OutStream'""")
display_hook_factory=Type('IPython.kernel.zmq.displayhook.ZMQDisplayHook', config=True,
help="""The class for handling displayhook.
Typically 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'""")
location=Unicode(config=True,
help="""The location (an IP address) of the controller. This is
used for disambiguating URLs, to determine whether
loopback should be used to connect or the public address.""")
timeout=Float(5.0, config=True,
help="""The time (in seconds) to wait for the Controller to respond
to registration requests before giving up.""")
max_heartbeat_misses=Integer(50, config=True,
help="""The maximum number of times a check for the heartbeat ping of a
controller can be missed before shutting down the engine.
If set to 0, the check is disabled.""")
sshserver=Unicode(config=True,
help="""The SSH server to use for tunneling connections to the Controller.""")
sshkey=Unicode(config=True,
help="""The SSH private key file to use when tunneling connections to the Controller.""")
paramiko=Bool(sys.platform == 'win32', config=True,
help="""Whether to use paramiko instead of openssh for tunnels.""")
# not configurable:
connection_info = Dict()
user_ns = Dict()
id = Integer(allow_none=True)
registrar = Instance('zmq.eventloop.zmqstream.ZMQStream')
kernel = Instance(Kernel)
hb_check_period=Integer()
# States for the heartbeat monitoring
# Initial values for monitored and pinged must satisfy "monitored > pinged == False" so that
# during the first check no "missed" ping is reported. Must be floats for Python 3 compatibility.
_hb_last_pinged = 0.0
_hb_last_monitored = 0.0
_hb_missed_beats = 0
# The zmq Stream which receives the pings from the Heart
_hb_listener = None
bident = CBytes()
ident = Unicode()
def _ident_changed(self, name, old, new):
self.bident = cast_bytes(new)
using_ssh=Bool(False)
def __init__(self, **kwargs):
super(EngineFactory, self).__init__(**kwargs)
self.ident = self.session.session
def init_connector(self):
"""construct connection function, which handles tunnels."""
self.using_ssh = bool(self.sshkey or self.sshserver)
if self.sshkey and not self.sshserver:
# We are using ssh directly to the controller, tunneling localhost to localhost
self.sshserver = self.url.split('://')[1].split(':')[0]
if self.using_ssh:
if tunnel.try_passwordless_ssh(self.sshserver, self.sshkey, self.paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%self.sshserver)
else:
password = False
def connect(s, url):
url = disambiguate_url(url, self.location)
if self.using_ssh:
self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
return tunnel.tunnel_connection(s, url, self.sshserver,
keyfile=self.sshkey, paramiko=self.paramiko,
password=password,
)
else:
return s.connect(url)
def maybe_tunnel(url):
"""like connect, but don't complete the connection (for use by heartbeat)"""
url = disambiguate_url(url, self.location)
if self.using_ssh:
self.log.debug("Tunneling connection to %s via %s", url, self.sshserver)
url,tunnelobj = tunnel.open_tunnel(url, self.sshserver,
keyfile=self.sshkey, paramiko=self.paramiko,
password=password,
)
return str(url)
return connect, maybe_tunnel
def register(self):
"""send the registration_request"""
self.log.info("Registering with controller at %s"%self.url)
ctx = self.context
connect,maybe_tunnel = self.init_connector()
reg = ctx.socket(zmq.DEALER)
reg.setsockopt(zmq.IDENTITY, self.bident)
connect(reg, self.url)
self.registrar = zmqstream.ZMQStream(reg, self.loop)
content = dict(uuid=self.ident)
self.registrar.on_recv(lambda msg: self.complete_registration(msg, connect, maybe_tunnel))
# print (self.session.key)
self.session.send(self.registrar, "registration_request", content=content)
def _report_ping(self, msg):
"""Callback for when the heartmonitor.Heart receives a ping"""
#self.log.debug("Received a ping: %s", msg)
self._hb_last_pinged = time.time()
def complete_registration(self, msg, connect, maybe_tunnel):
# print msg
self._abort_dc.stop()
ctx = self.context
loop = self.loop
identity = self.bident
idents,msg = self.session.feed_identities(msg)
msg = self.session.unserialize(msg)
content = msg['content']
info = self.connection_info
def url(key):
"""get zmq url for given channel"""
return str(info["interface"] + ":%i" % info[key])
if content['status'] == 'ok':
self.id = int(content['id'])
# launch heartbeat
# possibly forward hb ports with tunnels
hb_ping = maybe_tunnel(url('hb_ping'))
hb_pong = maybe_tunnel(url('hb_pong'))
hb_monitor = None
if self.max_heartbeat_misses > 0:
# Add a monitor socket which will record the last time a ping was seen
mon = self.context.socket(zmq.SUB)
mport = mon.bind_to_random_port('tcp://%s' % LOCALHOST)
mon.setsockopt(zmq.SUBSCRIBE, b"")
self._hb_listener = zmqstream.ZMQStream(mon, self.loop)
self._hb_listener.on_recv(self._report_ping)
hb_monitor = "tcp://%s:%i" % (LOCALHOST, mport)
heart = Heart(hb_ping, hb_pong, hb_monitor , heart_id=identity)
heart.start()
# create Shell Connections (MUX, Task, etc.):
shell_addrs = url('mux'), url('task')
# Use only one shell stream for mux and tasks
stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
stream.setsockopt(zmq.IDENTITY, identity)
shell_streams = [stream]
for addr in shell_addrs:
connect(stream, addr)
# control stream:
control_addr = url('control')
control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop)
control_stream.setsockopt(zmq.IDENTITY, identity)
connect(control_stream, control_addr)
# create iopub stream:
iopub_addr = url('iopub')
iopub_socket = ctx.socket(zmq.PUB)
iopub_socket.setsockopt(zmq.IDENTITY, identity)
connect(iopub_socket, iopub_addr)
# disable history:
self.config.HistoryManager.hist_file = ':memory:'
# Redirect input streams and set a display hook.
if self.out_stream_factory:
sys.stdout = self.out_stream_factory(self.session, iopub_socket, u'stdout')
sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id)
sys.stderr = self.out_stream_factory(self.session, iopub_socket, u'stderr')
sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id)
if self.display_hook_factory:
sys.displayhook = self.display_hook_factory(self.session, iopub_socket)
sys.displayhook.topic = cast_bytes('engine.%i.pyout' % self.id)
self.kernel = Kernel(parent=self, int_id=self.id, ident=self.ident, session=self.session,
control_stream=control_stream, shell_streams=shell_streams, iopub_socket=iopub_socket,
loop=loop, user_ns=self.user_ns, log=self.log)
self.kernel.shell.display_pub.topic = cast_bytes('engine.%i.displaypub' % self.id)
# periodically check the heartbeat pings of the controller
# Should be started here and not in "start()" so that the right period can be taken
# from the hubs HeartBeatMonitor.period
if self.max_heartbeat_misses > 0:
# Use a slightly bigger check period than the hub signal period to not warn unnecessary
self.hb_check_period = int(content['hb_period'])+10
self.log.info("Starting to monitor the heartbeat signal from the hub every %i ms." , self.hb_check_period)
self._hb_reporter = ioloop.PeriodicCallback(self._hb_monitor, self.hb_check_period, self.loop)
self._hb_reporter.start()
else:
self.log.info("Monitoring of the heartbeat signal from the hub is not enabled.")
# FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged
app = IPKernelApp(parent=self, shell=self.kernel.shell, kernel=self.kernel, log=self.log)
app.init_profile_dir()
app.init_code()
self.kernel.start()
else:
self.log.fatal("Registration Failed: %s"%msg)
raise Exception("Registration Failed: %s"%msg)
self.log.info("Completed registration with id %i"%self.id)
def abort(self):
self.log.fatal("Registration timed out after %.1f seconds"%self.timeout)
if self.url.startswith('127.'):
self.log.fatal("""
If the controller and engines are not on the same machine,
you will have to instruct the controller to listen on an external IP (in ipcontroller_config.py):
c.HubFactory.ip='*' # for all interfaces, internal and external
c.HubFactory.ip='192.168.1.101' # or any interface that the engines can see
or tunnel connections via ssh.
""")
self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
time.sleep(1)
sys.exit(255)
def _hb_monitor(self):
"""Callback to monitor the heartbeat from the controller"""
self._hb_listener.flush()
if self._hb_last_monitored > self._hb_last_pinged:
self._hb_missed_beats += 1
self.log.warn("No heartbeat in the last %s ms (%s time(s) in a row).", self.hb_check_period, self._hb_missed_beats)
else:
#self.log.debug("Heartbeat received (after missing %s beats).", self._hb_missed_beats)
self._hb_missed_beats = 0
if self._hb_missed_beats >= self.max_heartbeat_misses:
self.log.fatal("Maximum number of heartbeats misses reached (%s times %s ms), shutting down.",
self.max_heartbeat_misses, self.hb_check_period)
self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
self.loop.stop()
self._hb_last_monitored = time.time()
def start(self):
dc = ioloop.DelayedCallback(self.register, 0, self.loop)
dc.start()
self._abort_dc = ioloop.DelayedCallback(self.abort, self.timeout*1000, self.loop)
self._abort_dc.start()
| {
"repo_name": "noslenfa/tdjangorest",
"path": "uw/lib/python2.7/site-packages/IPython/parallel/engine/engine.py",
"copies": "2",
"size": "13047",
"license": "apache-2.0",
"hash": 5054244083947806000,
"line_mean": 41.7770491803,
"line_max": 127,
"alpha_frac": 0.60780256,
"autogenerated": false,
"ratio": 4.021886559802713,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5629689119802713,
"avg_score": null,
"num_lines": null
} |
"""A simple engine to query the index."""
import time
import operator
import pickle
import crawler
import preprocessor
import morphosyntactic
import indexer
import save_and_load
class NoIndexError(Exception):
"""
Exception produced when the search engine's index hasn't been initialized.
"""
def __init__(self):
pass
def __str__(self):
return 'The search engine\'s index has not been initialized.'
class NoUrlMapError(Exception):
"""
Exception produced when the search engine's id-to-url map (urls) hasn't
been initialized.
"""
def __init__(self):
pass
def __str__(self):
return 'The search engine\'s id-to-url map has not been initialized.'
class SimpleSearchEngine:
"""A simple engine to query the index."""
def __init__(self, index=None, urls=None):
"""
__init__(self, index=None, urls=None): Create a new SearchEngine.
'index' can be an existing index
'urls' can be a list mapping page ids to urls
"""
self._index = index
self._urls = urls
def query(self, input):
"""
query(input): 'input' is a string. Clean, tokenize and lemmatize it,
make the query and print the results.
"""
# clean input:
query = preprocessor.clean_query(input)
lemmas = morphosyntactic.lemmatize_query(query)
# make the query:
results = self.simple_query(lemmas)
return results
def simple_query(self, lemmas):
"""
simple_query(self, lemmas): Make a query to the index. 'lemmas' is
a list of lemmas to search for.
simple_query() returns a list of (url, weight) tuples sorted by weight
in descending order.
"""
if self._index == None:
raise NoIndexError
if self._urls == None:
raise NoUrlMapError
if len(lemmas) == 0:
return []
# a dict mapping urls to importance according to the query:
results = {}
for lemma in lemmas:
try:
for id, weight in self._index[lemma].iteritems():
# increase the page's importance by 'weight':
results[self._urls[id]] = results.get(self._urls[id],
0.0) + weight
except KeyError:
# 'lemma' was not in the index
continue
# sort by weight (descending order) and return
return sorted(results.iteritems(), key=operator.itemgetter(1),
reverse=True)
def evaluate(self, queries, repeat=1):
"""
evaluate(self, queries, repeat=1): Run a series of queries 'repeat'
times and return the average time. 'queries' is a list of lists
containing strings.
Each list is a query and each string (inside the lists) is a lemma
to search for.
"""
start = time.clock()
for i in xrange(repeat):
for query in queries:
self.simple_query(query)
stop = time.clock()
return (stop - start) / (len(queries) * repeat)
def load_index_and_urls(self, index_file=None, urls_file=None):
"""
load_index(self, index_file=None, urls_file=None): Load the index
and urls from files.
"""
if index_file is None:
index_file = 'index.xml'
with open(index_file, 'r') as f:
self._index = save_and_load.load_index(f)
if urls_file is None:
urls_file = 'urls.pickle'
with open(urls_file, 'r') as f:
self._urls = pickle.load(f)
def set_index_and_urls(self, index, urls):
"""
set_index_and_urls(self, index, urls): Use an existing index
and url map.
"""
self._index = index
self._urls = urls
def make_index_and_urls(self):
"""
make_index_and_urls(self): Make an index and a url-map from scratch.
"""
c = crawler.Crawler()
c.crawl()
self._urls = c.get_page_urls()
preprocessor.clean_and_tokenize_all()
self._index = indexer.make_index(tagged=False)
| {
"repo_name": "nitsas/simple-web-search-engine",
"path": "search_engine.py",
"copies": "1",
"size": "4310",
"license": "mit",
"hash": -1144587110971107200,
"line_mean": 30.6911764706,
"line_max": 78,
"alpha_frac": 0.5552204176,
"autogenerated": false,
"ratio": 4.140249759846301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0274133369776775,
"num_lines": 136
} |
"""A simple event tracing framework.
This is used to track events that are happing in irisett. For example
monitors call event.running(...) when they start/stop monitoring etc.
Other parts of irisett can then listen for events that are happening.
For example, the webmgmt ui can send events that are occuring over a websocket
to clients that want to watch events as they occur.
Performance concerns:
The event tracing infrastructure is probably not great from a performance
standpoint. However if there are no listeners connected the overhead
is marginal.
"""
from typing import Callable, Dict, Optional, List, Set, Union, Any
import time
import asyncio
from irisett import (
stats,
log,
)
class EventListener:
"""A single listener for the event tracer.
When EventTracer.listen is called an EventListener is created and
returned. The EventListener keeps track of state for the a callback
that wants to listen for events. filters etc. are kept in the
EventListener object.
"""
def __init__(self, tracer: 'EventTracer', callback: Callable, *,
event_filter: Optional[List[str]] = None,
active_monitor_filter: Optional[List[Union[str, int]]] = None) -> None:
self.tracer = tracer
self.callback = callback
self.created = time.time()
self.event_filter = self._parse_filter_list(event_filter)
self.active_monitor_filter = self._parse_filter_list(active_monitor_filter)
def set_event_filter(self, filter: Optional[List]) -> None:
self.event_filter = self._parse_filter_list(filter)
def set_active_monitor_filter(self, filter: Optional[List]) -> None:
self.active_monitor_filter = self._parse_filter_list(self._parse_active_monitor_filter(filter))
@staticmethod
def _parse_active_monitor_filter(filter: Optional[List]) -> Any:
if filter:
filter = [int(n) for n in filter]
return filter
@staticmethod
def _parse_filter_list(filter: Optional[List]) -> Any:
"""Parse a filter argument.
If a list of filter arguments are passed in convert it to a set
for increased lookup speed and reduced size.
"""
ret = None
if filter:
ret = set(filter)
return ret
def wants_event(self, event_name: str, args: Dict) -> bool:
"""Check if an event matches a listeners filters.
If it does not, the listener will not receive the event.
"""
ret = True
if self.event_filter and event_name not in self.event_filter:
ret = False
elif self.active_monitor_filter and 'monitor' in args and args['monitor'].monitor_type == 'active' \
and args['monitor'].id not in self.active_monitor_filter:
ret = False
return ret
class EventTracer:
"""The main event tracer class.
Creates listeners and receives events. When an event is received it
is sent to all listeners (that matches the events filters.
"""
def __init__(self) -> None:
self.listeners = set() # type: Set[EventListener]
stats.set('num_listeners', 0, 'EVENT')
stats.set('events_fired', 0, 'EVENT')
self.loop = asyncio.get_event_loop()
def listen(self, callback: Callable, *,
event_filter: Optional[List[str]] = None,
active_monitor_filter: Optional[List[Union[str, int]]] = None) -> EventListener:
"""Set a callback function that will receive events.
Two filters can be used when selecting which events the callback will
receive. event_filter can be a list of event names that must match.
active_monitor_filter can be a list of active monitor ids that must match.
"""
stats.inc('num_listeners', 'EVENT')
listener = EventListener(self, callback, event_filter=event_filter, active_monitor_filter=active_monitor_filter)
self.listeners.add(listener)
return listener
def stop_listening(self, listener: EventListener) -> None:
"""Remove a callback from the listener list."""
if listener in self.listeners:
stats.dec('num_listeners', 'EVENT')
self.listeners.remove(listener)
def running(self, event_name: str, **kwargs: Any) -> None:
"""An event is running.
Listener callbacks will be called with:
callback(listener-dict, event-name, timestamp, arg-dict)
"""
stats.inc('events_fired', 'EVENT')
if not self.listeners:
return
timestamp = time.time()
for listener in self.listeners:
if not listener.wants_event(event_name, kwargs):
continue
try:
t = listener.callback(listener, event_name, timestamp, kwargs)
asyncio.ensure_future(t)
except Exception as e:
log.msg('Failed to run event listener callback: %s' % str(e))
default_tracer = EventTracer()
listen = default_tracer.listen
stop_listening = default_tracer.stop_listening
running = default_tracer.running
| {
"repo_name": "beebyte/irisett",
"path": "irisett/event.py",
"copies": "1",
"size": "5132",
"license": "mit",
"hash": 4199092909868628500,
"line_mean": 36.7352941176,
"line_max": 120,
"alpha_frac": 0.6473109899,
"autogenerated": false,
"ratio": 4.2031122031122035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5350423193012204,
"avg_score": null,
"num_lines": null
} |
"""A simple example demonstrating basic usage of pyuv_cffi
This example creates a timer handle and a signal handle, then starts the loop. The timer callback is
run after 1 second, and repeating every 1 second thereafter. The signal handle registers a listener
for the INT signal and allows us to exit the loop by pressing ctrl-c.
"""
import signal
from pyuv_cffi import Loop, Timer, Signal
def sig_cb(sig_h, sig_num):
print('\nsig_cb({}, {})'.format(sig_h, sig_num))
sig_h.stop()
sig_h.loop.stop()
def timer_cb(timer_h):
print('timer_cb({})'.format(timer_h))
def run():
loop = Loop()
timer_h = Timer(loop)
timer_h.start(timer_cb, 1, 1)
sig_h = Signal(loop)
sig_h.start(sig_cb, signal.SIGINT)
status = loop.run()
timer_h.close() # we must stop and free any other handles before freeing the loop
print('loop.run() -> ', status)
# all handles in pyuv_cffi (including the loop) are automatically freed when they go out of
# scope
def main():
run()
if __name__ == '__main__':
main()
| {
"repo_name": "veegee/guv",
"path": "examples/pyuv_cffi_example.py",
"copies": "1",
"size": "1060",
"license": "mit",
"hash": 4128176578612776000,
"line_mean": 22.5555555556,
"line_max": 100,
"alpha_frac": 0.658490566,
"autogenerated": false,
"ratio": 3.365079365079365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45235699310793653,
"avg_score": null,
"num_lines": null
} |
"""A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
"""
import h5py
import numpy as np
# create some sample data
data = np.arange(0, 100).reshape(1, 100) + np.arange(1, 5).reshape(4, 1)
# Create source files (0.h5 to 3.h5)
for n in range(4):
with h5py.File(f"{n}.h5", "w") as f:
d = f.create_dataset("data", (100,), "i4", data[n])
# Assemble virtual dataset
layout = h5py.VirtualLayout(shape=(4, 100), dtype="i4")
for n in range(4):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, "data", shape=(100,))
layout[n] = vsource
# Add virtual dataset to output file
with h5py.File("VDS.h5", "w", libver="latest") as f:
f.create_virtual_dataset("vdata", layout, fillvalue=-5)
f.create_dataset("data", data=data, dtype="i4")
# read data back
# virtual dataset is transparent for reader!
with h5py.File("VDS.h5", "r") as f:
print("Virtual dataset:")
print(f["vdata"][:, :10])
print("Normal dataset:")
print(f["data"][:, :10])
| {
"repo_name": "h5py/h5py",
"path": "examples/vds_simple.py",
"copies": "1",
"size": "1175",
"license": "bsd-3-clause",
"hash": 6893195572346869000,
"line_mean": 29.9210526316,
"line_max": 75,
"alpha_frac": 0.6604255319,
"autogenerated": false,
"ratio": 2.959697732997481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9120123264897482,
"avg_score": 0,
"num_lines": 38
} |
# A simple example of calculating predictive posteriors in a normal unknown
# mean unknown variance case.
from __future__ import division
from lori import MHRun
import numpy as np
import pylab as p
import matplotlib as mpl
import random
from math import log, exp, pi, lgamma
import scipy.stats as st
import scipy
from scipy.special import betaln
import scipy.stats.distributions as di
from statsmodels.sandbox.distributions.mv_normal import MVT
#from sklearn.qda import QDA
import sys
sys.path.append('/home/bana/GSP/research/samc/code')
sys.path.append('/home/bana/GSP/research/samc/code/build')
mydb = []
class Classification():
def __init__(self):
np.random.seed(1234)
self.n = 4 # Data points
self.true_mu = 0.0
self.true_sigma = 1 #di.invgamma.rvs(3)
# For G function calculation and averaging
self.grid_n = 100
low,high = -4, 4
self.gextent = (low,high)
self.grid = np.linspace(low,high,self.grid_n)
self.gavg = np.zeros(self.grid_n)
self.numgavg = 0
#self.data = di.norm.rvs(size=self.n)
self.data = np.array([0.0, -0.0, 0.5, -0.5])
assert self.data.size == self.n
######## Starting point of MCMC Run #######
self.mu = 0.0
self.sigma = 2.0
###### Bookeeping ######
self.oldmu = None
self.oldsigma = None
##### Prior Values and Confidences ######
self.priorsigma = 2
self.kappa = 1
self.priormu = 0
self.nu = 8.0
#### Calculating the Analytic solution given on page 15 of Lori's
#### Optimal Classification eq 34.
self.nustar = self.nu + self.n
samplemean = self.data.mean()
samplevar = np.cov(self.data)
self.mustar = (self.nu*self.priormu + self.n * samplemean) \
/ (self.nu + self.n)
self.kappastar = self.kappa + self.n
self.Sstar = self.priorsigma + (self.n-1)*samplevar + self.nu*self.n/(self.nu+self.nu)\
* (samplemean - self.priormu)**2
#### Now calculate effective class conditional densities from eq 55
#### page 21
#self.fx = MVT(
#self.mu0star,
#(self.nu0star+1)/(self.kappa0star-self.D+1)/self.nu0star * self.S0star,
#self.kappa0star - self.D + 1)
# So I'm pretty sure this is incorrect below, off by some scaling
# parameters
self.fx = MVT(
[self.mustar],
[(self.nustar+1)/(self.kappastar)/self.nustar * self.Sstar / 2],
self.kappastar /2 )
self.analyticfx = self.fx.logpdf(self.grid.reshape(-1,1))
def propose(self):
self.oldmu = self.mu
self.oldsigma = self.sigma
self.mu += np.random.randn()*0.1
#self.mu = np.random.randn()
self.sigma = di.invgamma.rvs(1)
return 0
def copy(self):
return (self.mu, self.sigma, di.norm.rvs(loc=self.mu, scale=self.sigma))
def reject(self):
self.mu = self.oldmu
self.sigma = self.oldsigma
def energy(self):
sum = 0.0
sum -= di.norm.logpdf(self.data, loc=self.mu, scale=self.sigma).sum()
#Now add in the priors...
sum -= log(self.sigma)*(-0.5) - self.nu/2 * (self.mu-self.priormu)**2/self.sigma
sum -= log(self.sigma)*(self.kappa+2)/(-2) - 0.5*self.priorsigma/self.sigma
return sum
def calc_gfunc(self):
return di.norm.pdf(self.grid, loc=self.mu, scale=self.sigma)
def init_db(self, db, dbsize):
pass
#dtype = [('thetas',np.double),
#('energies',np.double),
#('funcs',np.double)]
#if db == None:
#return np.zeros(dbsize, dtype=dtype)
#elif db.shape[0] != dbsize:
#return np.resize(db, dbsize)
#else:
#raise Exception("DB Not inited")
def save_to_db(self, db, theta, energy, iteration):
#func = 0.0
#db[iteration] = np.array([theta, energy, func])
global mydb
mydb.append(self.copy())
# Update G function average
self.numgavg += 1
self.gavg += (self.calc_gfunc() - self.gavg) / self.numgavg
def pnorm(loc,scale):
p.figure()
x = np.linspace(-20, 20, 400)
p.plot(x, di.norm.pdf(x,loc=loc, scale=scale))
p.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import samc
from samcnet.utils import *
c = Classification()
#p.close('all')
s = MHRun(c, burn=0)
#s = samc.SAMCRun(c, burn=0, stepscale=1000, refden=0)
s.sample(1e3)
#plotHist(s)
##################################
##################################
#p.subplot(4,1,1)
#p.plot(c.grid, c.gavg, 'r')
#p.plot(c.data, np.ones_like(c.data), 'ko')
#p.grid(True)
#x = np.linspace(0.01,2,50)
#p.subplot(4,1,2)
#p.hist(np.vstack(mydb)[:,1],bins=x)
#p.subplot(4,1,3)
#mus = np.vstack(mydb)[:,0]
#counts,bins,_ = p.hist(mus,bins=80)
#xx = np.linspace(bins[0], bins[-1], 300)
#ty = di.t.pdf(xx, *di.t.fit(mus))
#ny = di.norm.pdf(xx, *di.norm.fit(mus))
#p.plot(xx,ty*counts.max()/ty.max(),'g', label='t fit')
#p.plot(xx,ny*counts.max()/ny.max(),'b--', label='normal fit')
#p.legend()
#p.subplot(4,1,4)
#ys = np.vstack(mydb)[:,2]
#counts,bins,_ = p.hist(ys,bins=80)
#xx = np.linspace(bins[0], bins[-1], 300)
#ty = di.t.pdf(xx, *di.t.fit(ys))
#ny = di.norm.pdf(xx, *di.norm.fit(ys))
#ay = c.fx.pdf(xx.reshape(-1,1))
#p.title("sampled y's")
#p.plot(xx,ty*counts.max()/ty.max(),'g', label='t fit')
#p.plot(xx,ny*counts.max()/ny.max(),'b--', label='normal fit')
#p.plot(xx,ay*counts.max()/ay.max(),'k--', label='t analytic')
#p.legend()
##############################
fig1 = plt.figure()
#xx = np.linspace(bins[0], bins[-1], 300)
#ty = di.t.logpdf(xx, *di.t.fit(ys))
#p.plot(xx,ty,'g', label='t empirical')
plt.title("predictive posteriors")
plt.ylabel('logpdfs')
plt.grid(True)
plt.hold(True)
plt.plot(c.data, np.ones_like(c.data), 'ko', label='data')
plt.plot(c.grid, np.exp(c.analyticfx), 'k--', label='student t')
if True:
s.sample(3e3)
plt.plot(c.grid, c.gavg, 'r', label='gavg')
ys = np.vstack(mydb)[:,2]
counts,bins,_ = p.hist(ys,bins=80,normed=True)
elif False: # Animation
l, = plt.plot(c.grid, c.gavg, 'r', label='gavg')
def update_line(num, data, line):
global c
line.set_data(c.grid,data[num])
return line,
data = [c.gavg.copy()]
N=int(sys.argv[1])
for x in range(N):
s.sample(1e3)
data.append(c.gavg.copy())
line_ani = animation.FuncAnimation(fig1, update_line, N, fargs=(data, l),
interval=50, blit=True)
plt.show()
| {
"repo_name": "binarybana/samcnet",
"path": "samcnet/tail.py",
"copies": "1",
"size": "7013",
"license": "mit",
"hash": 2275551134990323700,
"line_mean": 28.8425531915,
"line_max": 95,
"alpha_frac": 0.5511193498,
"autogenerated": false,
"ratio": 3.0111635895234006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40622829393234006,
"avg_score": null,
"num_lines": null
} |
"""A simple example of Google Analytics batched user permissions."""
import json
from apiclient.errors import HttpError
from apiclient.http import BatchHttpRequest
def call_back(request_id, response, exception):
"""Handle batched request responses."""
print request_id
if exception is not None:
if isinstance(exception, HttpError):
message = json.loads(exception.content)['error']['message']
print ('Request %s returned API error : %s : %s ' %
(request_id, exception.resp.status, message))
else:
print response
def add_users(users, permissions):
"""Adds users to every view (profile) with the given permissions.
Args:
users: A list of user email addresses.
permissions: A list of user permissions.
Note: this code assumes you have MANAGE_USERS level permissions
to each profile and an authorized Google Analytics service object.
"""
# Get the a full set of account summaries.
account_summaries = analytics.management().accountSummaries().list().execute()
# Loop through each account.
for account in account_summaries.get('items', []):
account_id = account.get('id')
# Loop through each user.
for user in users:
# Create the BatchHttpRequest object.
batch = BatchHttpRequest(callback=call_back)
# Loop through each property.
for property_summary in account.get('webProperties', []):
property_id = property_summary.get('id')
# Loop through each view (profile).
for view in property_summary.get('profiles', []):
view_id = view.get('id')
# Construct the Profile User Link.
link = analytics.management().profileUserLinks().insert(
accountId=account_id,
webPropertyId=property_id,
profileId=view_id,
body={
'permissions': {
'local': permissions
},
'userRef': {
'email': user
}
}
)
batch.add(link)
# Execute the batch request for each user.
batch.execute()
if __name__ == '__main__':
# Construct a list of users.
emails = ['ona@gmail.com', 'emi@gmail.com', 'sue@gmail.com', 'liz@gmail.com']
# call the add_users function with the list of desired permissions.
add_users(emails, ['READ_AND_ANALYZE'])
| {
"repo_name": "mcohoon/api-samples",
"path": "batching/permissions.py",
"copies": "1",
"size": "2389",
"license": "apache-2.0",
"hash": -3404297649734361600,
"line_mean": 31.7260273973,
"line_max": 80,
"alpha_frac": 0.6224361658,
"autogenerated": false,
"ratio": 4.335753176043557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5458189341843557,
"avg_score": null,
"num_lines": null
} |
"""A simple example of how to access the Google Analytics API."""
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import re
from collections import OrderedDict
from operator import itemgetter
import yaml
import datetime
def get_service(api_name, api_version, scopes, key_file_location):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scopes: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account JSON key file.
Returns:
A service that is connected to the specified API.
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
key_file_location, scopes=scopes)
# Build the service object.
service = build(api_name, api_version, credentials=credentials)
return service
def get_first_profile_id(service):
# Use the Analytics service object to get the first profile id.
# Get a list of all Google Analytics accounts for this user
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
# Get the first Google Analytics account.
account = accounts.get('items')[0].get('id')
# Get a list of all the properties for the first account.
properties = service.management().webproperties().list(
accountId=account).execute()
if properties.get('items'):
# Get the first property id.
property = properties.get('items')[0].get('id')
# Get a list of all views (profiles) for the first property.
profiles = service.management().profiles().list(
accountId=account,
webPropertyId=property).execute()
if profiles.get('items'):
# return the first view (profile) id.
return profiles.get('items')[0].get('id')
return None
def get_results(service, profile_id, type):
# Use the Analytics Service Object to query the Core Reporting API
# for the number of sessions within the past seven days.
return service.data().ga().get(
ids='ga:' + profile_id,
start_date='30daysAgo',
end_date='today',
metrics='ga:uniquePageviews',
dimensions='ga:pagePath',
sort='-ga:uniquePageviews',
filters='ga:pagePath=~/' + type + '/.+').execute()
def save_results(service, profile_id, type):
results = get_results(service, profile_id, type)
# Print data nicely for the user.
if results:
rows = results.get('rows')
rank = 1
prog = re.compile(r'\/'+type+'\/[a-z0-9]+')
map = {}
for row in rows:
if not prog.search(row[0]): continue
url = re.sub(r'.*(/'+type+'/[a-z0-9]+).*', r"\1", row[0])
if url in map:
map[url] = map[url] + int(row[1])
else:
map[url] = int(row[1])
map = OrderedDict(sorted(map.items(), key=itemgetter(0)))
map = OrderedDict(sorted(map.items(), key=itemgetter(1), reverse=True))
with open("../../_data/rank.yaml", "r") as file:
final_result = yaml.load(file.read())
if not final_result:
print ("No previous record found")
final_result = {
'meta': {
'previous_date': None,
'current_date': datetime.date.today().isoformat()
},
'distributions' : [],
'desktops': []
}
elif not 'desktops' in final_result:
final_result['desktops'] = []
else:
final_result['meta']['previous_date'] = final_result['meta']['current_date']
final_result['meta']['current_date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
processed = []
for url in map:
distribution = None
for d in final_result['distributions' if type == 'distribution' else 'desktops']:
if d['url'] == url:
distribution = d
break
if not distribution:
distribution = {
'url': url,
'previous': None,
'current': None
}
final_result['distributions' if type == 'distribution' else 'desktops'].append(distribution)
distribution['previous'] = distribution['current']
distribution['current'] = {
'rank': rank,
'count': map[url]
}
processed.append(url);
rank += 1
for d in final_result['distributions' if type == 'distribution' else 'desktops']:
if d['url'] not in processed:
d['previous'] = d['current']
d['current'] = {
'rank': rank,
'count': 0
}
rank += 1
# ut.sort(key=lambda x: x.count, reverse=True)
final_result['distributions' if type == 'distribution' else 'desktops'].sort(key = lambda x: x['current']['rank'])
with open("../../_data/rank.yaml", "w") as file:
file.write(yaml.safe_dump(final_result, default_flow_style=False))
print ('Done')
else:
print ('No results found')
def main():
# Define the auth scopes to request.
scope = 'https://www.googleapis.com/auth/analytics.readonly'
key_file_location = '../../../secret.json'
# Authenticate and construct service.
service = get_service(
api_name='analytics',
api_version='v3',
scopes=[scope],
key_file_location=key_file_location)
profile_id = get_first_profile_id(service)
save_results(service, profile_id, 'distribution')
save_results(service, profile_id, 'desktop')
if __name__ == '__main__':
main() | {
"repo_name": "opensourcefeed/opensourcefeed.github.io",
"path": "assets/pageview/fetch.py",
"copies": "1",
"size": "6095",
"license": "mit",
"hash": 3225534384809617000,
"line_mean": 32.679558011,
"line_max": 122,
"alpha_frac": 0.556685808,
"autogenerated": false,
"ratio": 4.265220433869839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024576360960789053,
"num_lines": 181
} |
"""A simple example of how to access the Google Analytics API."""
import argparse
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
import os
from . import googleaccount
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account_email, key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def get_first_profile_id(service):
# Use the Analytics service object to get the first profile id.
# Get a list of all Google Analytics accounts for this user
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
# Get the first Google Analytics account.
account = accounts.get('items')[0].get('id')
# Get a list of all the properties for the first account.
properties = service.management().webproperties().list(
accountId=account).execute()
if properties.get('items'):
# Get the first property id.
property = properties.get('items')[0].get('id')
# Get a list of all views (profiles) for the first property.
profiles = service.management().profiles().list(
accountId=account,
webPropertyId=property).execute()
if profiles.get('items'):
# return the first view (profile) id.
return profiles.get('items')[0].get('id')
return None
#This is a sample from the original with some modifications which has been left here as an example
def get_results(service, profile_id):
# Use the Analytics Service Object to query the Core Reporting API
# for the number of sessions within the past seven days.
return service.data().ga().get(
ids='ga:' + profile_id,
start_date='30daysAgo',
end_date='today',
metrics='ga:pageviews',
dimensions='ga:PagePathLevel1,ga:PagePathLevel2'
).execute()
#Accepts a function to call against the api after setting up the required variables. This is essentially just a module to run any function with the analytics api.
def run(request_func):
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/analytics.readonly']
# Use the developer console and replace the values with your
# service account email and relative location of your key file.
# service account in git ignored file
service_account_email = googleaccount.ANALYTICS_ACCOUNT
file_path=os.path.dirname(os.path.abspath(__file__))
key_file_location = file_path+'/googlekey.p12'
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, key_file_location,
service_account_email)
profile = get_first_profile_id(service)
return request_func(service, profile)
| {
"repo_name": "jonfroehlich/makeabilitylabwebsite",
"path": "website/googleanalytics.py",
"copies": "1",
"size": "3442",
"license": "mit",
"hash": -8046027808778075000,
"line_mean": 31.4716981132,
"line_max": 162,
"alpha_frac": 0.7193492156,
"autogenerated": false,
"ratio": 4.039906103286385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017812564778049943,
"num_lines": 106
} |
"""A simple example of how to access the Google Analytics API."""
import argparse
from apiclient.discovery import build
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
def get_service(api_name, api_version, scope, client_secrets_path):
"""Get a service that communicates to a Google API.
Args:
api_name: string The name of the api to connect to.
api_version: string The api version to connect to.
scope: A list of strings representing the auth scopes to authorize for the
connection.
client_secrets_path: string A path to a valid client secrets file.
Returns:
A service that is connected to the specified API.
"""
# Parse command-line arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets_path, scope=scope,
message=tools.message_if_missing(client_secrets_path))
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage(api_name + '.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def main():
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/analytics.readonly']
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, 'client_secrets.json')
return service
if __name__ == '__main__':
main()
| {
"repo_name": "Felixaverlant/starter_kit_python_notebook_gapi",
"path": "auth.py",
"copies": "2",
"size": "1996",
"license": "mit",
"hash": 4952279847229344000,
"line_mean": 31.7213114754,
"line_max": 79,
"alpha_frac": 0.7284569138,
"autogenerated": false,
"ratio": 4.081799591002045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5810256504802045,
"avg_score": null,
"num_lines": null
} |
"""A simple example of how to access the Google Analytics API."""
import re
from datetime import date
import difflib
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
import json
import pywikibot
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
with open(key_file_location, 'rb') as f:
key = json.load(f)['private_key']
credentials = SignedJwtAssertionCredentials(service_account_email, key,
scope=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def get_first_profile_id(service):
# Use the Analytics service object to get the first profile id.
# Get a list of all Google Analytics accounts for this user
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
# Get the first Google Analytics account.
account = accounts.get('items')[0].get('id')
# Get a list of all the properties for the first account.
properties = service.management().webproperties().list(
accountId=account).execute()
if properties.get('items'):
# Get the first property id.
property = properties.get('items')[0].get('id')
# Get a list of all views (profiles) for the first property.
profiles = service.management().profiles().list(
accountId=account,
webPropertyId=property).execute()
if profiles.get('items'):
# return the first view (profile) id.
return profiles.get('items')[0].get('id')
return None
def filter_main_ns(results):
pages = []
for r in results:
url_title = re.match(r'^/w/([^/]+)$', r[1])
if not url_title:
continue
else:
url_title = url_title.group(1)
if re.match(r'[a-zA-Z0-9_ ]+:.*', url_title):
continue
if re.match('.*\.php', url_title):
continue
match = re.match(r'(.+) - Metakgp Wiki', r[0])
if not match:
continue
if match.group(1) in pages:
continue
#print 'r: ', r
pages.append(match.group(1))
if len(pages) == 10:
break
return pages
def get_popular_pages(service, profile_id):
results = service.data().ga().get(
ids='ga:' + profile_id,
start_date='90daysAgo',
end_date='today',
metrics='ga:uniquePageviews',
dimensions='ga:pageTitle, ga:pagePath',
sort='-ga:uniquePageviews',
max_results=50).execute()
return filter_main_ns(results['rows'])
def get_trending_pages(service, profile_id):
results = service.data().ga().get(
ids='ga:' + profile_id,
start_date='7daysAgo',
end_date='today',
metrics='ga:entrances',
dimensions='ga:pageTitle, ga:landingPagePath',
sort='-ga:entrances',
max_results=50).execute()
return filter_main_ns(results['rows'])
def update_list_of_pages(template, pages):
template_page = pywikibot.Page(pywikibot.Link(template), pywikibot.Site())
text = " <noinclude>This page is automatically generated. Changes will be overwritten, so '''do not modify'''.</noinclude>\n"
for p in pages:
text += "*[[%s]]\n" % p
text = text.rstrip()
if template_page.text == text:
print template, 'unchanged, no edit made.'
return
else:
print template, 'changed:'
print text
#diff = difflib.ndiff(template_page.text.splitlines(1),
# text.splitlines(1))
#for d in diff:
# print d,
template_page.text = text
template_page.save('Updated on ' +
date.today().strftime('%B %d, %Y'))
def main():
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/analytics.readonly']
# Use the developer console and replace the values with your
# service account email and relative location of your key file.
service_account_email = '225416695729-s8pjufb10pkgp269bbvf1hsdimcnmpbn@developer.gserviceaccount.com'
key_file_location = 'ga_credentials.json'
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, key_file_location,
service_account_email)
profile = get_first_profile_id(service)
popular_pages = get_popular_pages(service, profile)
update_list_of_pages('Template:Popular_pages', popular_pages)
trending_pages = get_trending_pages(service, profile)
update_list_of_pages('Template:Trending_pages', trending_pages)
if __name__ == '__main__':
main()
| {
"repo_name": "icyflame/batman",
"path": "scripts/updatestatistics.py",
"copies": "1",
"size": "5295",
"license": "mit",
"hash": -4940328026249490000,
"line_mean": 30.1470588235,
"line_max": 129,
"alpha_frac": 0.6203966006,
"autogenerated": false,
"ratio": 3.862144420131291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4982541020731291,
"avg_score": null,
"num_lines": null
} |
"""A simple example of how to use IPython.config.application.Application.
This should serve as a simple example that shows how the IPython config
system works. The main classes are:
* IPython.config.configurable.Configurable
* IPython.config.configurable.SingletonConfigurable
* IPython.config.loader.Config
* IPython.config.application.Application
To see the command line option help, run this program from the command line::
$ python appconfig.py -h
To make one of your classes configurable (from the command line and config
files) inherit from Configurable and declare class attributes as traits (see
classes Foo and Bar below). To make the traits configurable, you will need
to set the following options:
* ``config``: set to ``True`` to make the attribute configurable.
* ``shortname``: by default, configurable attributes are set using the syntax
"Classname.attributename". At the command line, this is a bit verbose, so
we allow "shortnames" to be declared. Setting a shortname is optional, but
when you do this, you can set the option at the command line using the
syntax: "shortname=value".
* ``help``: set the help string to display a help message when the ``-h``
option is given at the command line. The help string should be valid ReST.
When the config attribute of an Application is updated, it will fire all of
the trait's events for all of the config=True attributes.
"""
from IPython.config.configurable import Configurable
from IPython.config.application import Application
from IPython.utils.traitlets import (
Bool, Unicode, Int, List, Dict
)
class Foo(Configurable):
"""A class that has configurable, typed attributes.
"""
i = Int(0, config=True, help="The integer i.")
j = Int(1, config=True, help="The integer j.")
name = Unicode(u'Brian', config=True, help="First name.")
class Bar(Configurable):
enabled = Bool(True, config=True, help="Enable bar.")
class MyApp(Application):
name = Unicode(u'myapp')
running = Bool(False, config=True,
help="Is the app running?")
classes = List([Bar, Foo])
config_file = Unicode(u'', config=True,
help="Load this config file")
aliases = Dict(dict(i='Foo.i',j='Foo.j',name='Foo.name', running='MyApp.running',
enabled='Bar.enabled', log_level='MyApp.log_level'))
flags = Dict(dict(enable=({'Bar': {'enabled' : True}}, "Enable Bar"),
disable=({'Bar': {'enabled' : False}}, "Disable Bar"),
debug=({'MyApp':{'log_level':10}}, "Set loglevel to DEBUG")
))
def init_foo(self):
# Pass config to other classes for them to inherit the config.
self.foo = Foo(config=self.config)
def init_bar(self):
# Pass config to other classes for them to inherit the config.
self.bar = Bar(config=self.config)
def initialize(self, argv=None):
self.parse_command_line(argv)
if self.config_file:
self.load_config_file(self.config_file)
self.init_foo()
self.init_bar()
def start(self):
print("app.config:")
print(self.config)
def main():
app = MyApp()
app.initialize()
app.start()
if __name__ == "__main__":
main()
| {
"repo_name": "OSGeo-live/CesiumWidget",
"path": "GSOC/notebooks/ipython/examples/Customization/appconfig.py",
"copies": "3",
"size": "3290",
"license": "apache-2.0",
"hash": 5892795823012296000,
"line_mean": 32.2323232323,
"line_max": 85,
"alpha_frac": 0.6665653495,
"autogenerated": false,
"ratio": 3.9495798319327733,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6116145181432774,
"avg_score": null,
"num_lines": null
} |
"""A simple example of Pyglet/Twisted integration. A Pyglet window
is displayed, and both Pyglet and Twisted are making scheduled calls
and regular intervals. Interacting with the window doesn't interfere
with either calls.
"""
import pyglet
from pigtwist import pygletreactor
pygletreactor.install() # <- this must come before...
from twisted.internet import reactor, task # <- ...importing this reactor!
# Create a Pyglet window with a simple message
window = pyglet.window.Window()
label = pyglet.text.Label('hello world',
x = window.width / 2,
y = window.height / 2,
anchor_x = 'center',
anchor_y = 'center')
@window.event
def on_draw():
window.clear()
label.draw()
@window.event
def on_close():
reactor.callFromThread(reactor.stop)
# Return true to ensure that no other handlers
# on the stack receive the on_close event
return True
# Schedule a function call in Pyglet
def runEverySecondPyglet(dt):
print "pyglet call: one second has passed"
pyglet.clock.schedule_interval(runEverySecondPyglet, 1)
# Schedule a function call in Twisted
def runEverySecondTwisted():
print "twisted call: 1.5 seconds have passed"
l = task.LoopingCall(runEverySecondTwisted)
l.start(1.5)
# Start the reactor
reactor.run()
| {
"repo_name": "padraigkitterick/pyglet-twisted",
"path": "examples/hello_twisted.py",
"copies": "1",
"size": "1357",
"license": "mit",
"hash": 2563591969172420600,
"line_mean": 28.5,
"line_max": 74,
"alpha_frac": 0.6853352985,
"autogenerated": false,
"ratio": 3.8882521489971347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890244025270608,
"avg_score": 0.03666868444530525,
"num_lines": 46
} |
"""A simple example of sending control commands to the drone via qtdronequi.
The qtdronegui program can accept control commands for the drone over the
network. It listens on UDP port 5560 for JSON formatted control commands.
This program serves as an example of communicating with the drone.
"""
# Import the standard library modules which we use
import json # for converting the control state into a string to send to the GUI
import socket # for sending the string to the GUI
import time # for sleeping for a set period of time
"""A global socket object which can be used to send commands to the GUI program."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
"""A global sequence counter. The GUI uses this to determine if two commands
have been received in the wrong order: the command with the largest (latest)
sequence will always 'win'."""
seq = 0
def main():
"""The main entry point of the progam. Called by the bootstrap code at the
bottom of the file.
"""
# Initialse a dictionary corresponding to the controls current state.
state = {
'roll': 0.0,
'pitch': 0.0,
'yaw': 0.0,
'gas': 0.0,
'take_off': False,
'reset': False,
'hover': True,
};
# Reset the drone
state = press(state, 'reset')
# Wait 5 seconds for the drone to reset
time.sleep(5)
# Take off
state = press(state, 'take_off')
# Wait 10 seconds
time.sleep(10)
# Press take off again (paradoxically, this will land the drone).
state = press(state, 'take_off')
def send_state(state):
"""Send the state dictionary to the drone GUI.
state is a dictionary with (at least) the keys roll, pitch, yaw, gas,
take_off, reset and hover. The first four are floating point values on the
interval [-1,1] which specify the setting of the corresponding attitude
angle/vertical speed. The last three are True or False to indicate if that
virtual 'button' is pressed.
"""
global seq, sock
seq += 1
HOST, PORT = ('127.0.0.1', 5560)
print('state is', json.dumps({'seq': seq, 'state': state}))
sock.sendto(json.dumps({'seq': seq, 'state': state}), (HOST, PORT))
def press(state, button):
"""Simulate pressing a button.
state is the current state of the controls.
button is a string which is one of: 'reset' or 'take_off'.
This function simulate pressing the correspinding button by setting the state
of that button to true, waiting for half a second, and setting it back to
false.
This function returns the new state.
Idiomatic use is to pass the current state and set the current state to the
return value. e.g.:
state = {
'roll': # ...
}
state = press(state, 'reset')
state = press(state, 'take_off')
# ... etc
FIXME: No attempt to *verify* the value of button is made.
"""
# Press the button
state[button] = True
send_state(state)
# Wait
time.sleep(0.5)
# Release the button
state[button] = False
send_state(state)
if __name__ == '__main__':
main()
| {
"repo_name": "rc500/ardrone_archive_aarons_laptop",
"path": "examples/take_off_and_land_corrected.py",
"copies": "1",
"size": "3017",
"license": "apache-2.0",
"hash": 4533511735225645000,
"line_mean": 25.4649122807,
"line_max": 83,
"alpha_frac": 0.6794829301,
"autogenerated": false,
"ratio": 3.670316301703163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9717030484046816,
"avg_score": 0.02655374955126946,
"num_lines": 114
} |
# A simple example of using parser combinators to build an arithmetic
# expression parser.
from pcomb import *
## Actions
def digits_to_number(digits, running=0):
"""Convert a list of digits to an integer"""
if len(digits) == 0:
return running
else:
r = (running * 10) + int(digits[0])
return digits_to_number(digits[1:], r)
def unary_to_number(n):
if n[0] == None:
return n[1]
else:
return -n[1]
def eval_add(lst):
"""Evaluate an addition expression. For addition rules, the parser will return
[number, [[op, number], [op, number], ...]]
To evaluate that, we start with the first element of the list as result value,
and then we iterate over the pairs that make up the rest of the list, adding
or subtracting depending on the operator.
"""
first = lst[0]
result = first
for n in lst[1]:
if n[0] == '+':
result += n[1]
else:
result -= n[1]
return result
def eval_mult(lst):
"""Evaluate a multiplication expression. This is the same idea as evaluating
addition, but with multiplication and division operators instead of addition and
subtraction.
"""
first = lst[0]
result = first
for n in lst[1]:
if n[0] == '*':
result = result * n[1]
else:
result = result / n[1]
return result
## The Grammar
# expr : add_expr ( ( '*' | '/' ) add_expr )*
# add_expr : unary_expr ( ( '+' | '-' ) unary_expr )*
# unary_expr : ( '-' )? simple
# simple : number | parens
# parens : '(' expr ')'
# number: digit+
digit = Parser.match(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
number = Action(digit.many(1), digits_to_number)
parens = Action(Parser.match(['(']) & Reference('expr') & Parser.match([')']),
lambda result: result[1])
simple = number | parens
unary_expr = Action(Parser.match(['-']).opt() & simple, unary_to_number)
mult_expr = Action(unary_expr & (Parser.match(['*', '/']) & unary_expr).many(), eval_mult)
add_expr = Action(mult_expr & (Parser.match(['-', '+']) & mult_expr).many(), eval_add)
expr = add_expr
Reference.register_named_parser('expr', add_expr)
inp = StringParserInput("1+2*(3+5*4)*(6+7)")
print(expr.parse(inp).output)
| {
"repo_name": "MarkChuCarroll/pcomb",
"path": "python/calc.py",
"copies": "1",
"size": "2172",
"license": "apache-2.0",
"hash": 3265189637571956000,
"line_mean": 27.96,
"line_max": 91,
"alpha_frac": 0.6174033149,
"autogenerated": false,
"ratio": 3.2369597615499255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354363076449926,
"avg_score": null,
"num_lines": null
} |
""" A simple example of using the workbench window. """
# Enthought library imports.
from enthought.pyface.action.api import Action, MenuManager
from enthought.pyface.workbench.api import EditorManager, WorkbenchWindow
from enthought.pyface.workbench.api import Perspective, PerspectiveItem
from enthought.pyface.workbench.action.api import MenuBarManager
from enthought.pyface.workbench.action.api import ToolBarManager
from enthought.pyface.workbench.action.api import ViewMenuManager
from enthought.traits.api import Callable, List, Instance
# Local imports.
from black_view import BlackView
from blue_view import BlueView
from green_view import GreenView
from red_view import RedView
from yellow_view import YellowView
from person import Person
class ExampleEditorManager(EditorManager):
""" An editor manager that supports the editor memento protocol. """
#######################################################################
# 'IEditorManager' interface.
#######################################################################
def get_editor_memento(self, editor):
""" Return the state of the editor contents. """
# Return the data attributes as a tuple.
return (editor.obj.name, editor.obj.age)
def set_editor_memento(self, memento):
""" Restore an editor from a memento and return it. """
# Create a new data object.
name, age = memento
person = Person(name=name, age=age)
# Create an editor for the data.
return self.create_editor(self.window, person, None)
class ExampleWorkbenchWindow(WorkbenchWindow):
""" A simple example of using the workbench window. """
#### 'WorkbenchWindow' interface ##########################################
# The available perspectives.
perspectives = [
Perspective(
name = 'Foo',
contents = [
PerspectiveItem(id='Black', position='bottom', height=0.1),
PerspectiveItem(id='Debug', position='left', width=0.25)
]
),
Perspective(
name = 'Bar',
contents = [
PerspectiveItem(id='Black', position='top'),
PerspectiveItem(id='Blue', position='bottom'),
PerspectiveItem(id='Green', position='left'),
PerspectiveItem(id='Red', position='right'),
PerspectiveItem(id='Debug', position='left')
]
)
]
#### 'ExampleWorkbenchWindow' interface ###################################
# The view factories.
#
# fixme: This should be part of the standadr 'WorkbenchWindow'!
view_factories = List(Callable)
#### Private interface ####################################################
# The Exit action.
_exit_action = Instance(Action)
# The New Person action.
_new_person_action = Instance(Action)
###########################################################################
# 'ApplicationWindow' interface.
###########################################################################
#### Trait initializers ###################################################
def _editor_manager_default(self):
""" Trait initializer.
Here we return the replacement editor manager.
"""
return ExampleEditorManager()
def _menu_bar_manager_default(self):
""" Trait initializer. """
file_menu = MenuManager(
self._new_person_action, self._exit_action,
name='&File', id='FileMenu'
)
view_menu = ViewMenuManager(name='&View', id='ViewMenu', window=self)
return MenuBarManager(file_menu, view_menu, window=self)
def _tool_bar_managers_default(self):
""" Trait initializer. """
# Add multiple (albeit identical!) tool bars just to show that it is
# allowed!
tool_bar_managers = [
ToolBarManager(
self._exit_action, show_tool_names = False, name=str(i)
)
for i in range(5)
]
return tool_bar_managers
###########################################################################
# 'WorkbenchWindow' interface.
###########################################################################
#### Trait initializers ###################################################
def _view_factories_default(self):
""" Trait initializer. """
from enthought.pyface.workbench.debug.api import DebugView
return [DebugView, BlackView, BlueView, GreenView, RedView, YellowView]
def _views_default(self):
""" Trait initializer. """
# Using an initializer makes sure that every window instance gets its
# own view instances (which is necessary since each view has a
# reference to its toolkit-specific control etc.).
return [factory(window=self) for factory in self.view_factories]
###########################################################################
# Private interface.
###########################################################################
def __exit_action_default(self):
""" Trait initializer. """
return Action(name='E&xit', on_perform=self.workbench.exit)
def __new_person_action_default(self):
""" Trait initializer. """
return Action(name='New Person', on_perform=self._new_person)
def _new_person(self):
""" Create a new person. """
from person import Person
self.workbench.edit(Person(name='New', age=100))
return
#### EOF ######################################################################
| {
"repo_name": "enthought/traitsgui",
"path": "examples/workbench/example_workbench_window.py",
"copies": "1",
"size": "5712",
"license": "bsd-3-clause",
"hash": -70281229219575144,
"line_mean": 32.2093023256,
"line_max": 79,
"alpha_frac": 0.5280112045,
"autogenerated": false,
"ratio": 4.720661157024794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026905684754521967,
"num_lines": 172
} |
""" A simple example of using the workbench window. """
# Enthought library imports.
from pyface.action.api import Action, MenuManager
from pyface.workbench.api import EditorManager, WorkbenchWindow
from pyface.workbench.api import Perspective, PerspectiveItem
from pyface.workbench.action.api import MenuBarManager
from pyface.workbench.action.api import ToolBarManager
from pyface.workbench.action.api import ViewMenuManager
from traits.api import Callable, List, Instance
# Local imports.
from black_view import BlackView
from blue_view import BlueView
from green_view import GreenView
from red_view import RedView
from yellow_view import YellowView
from person import Person
class ExampleEditorManager(EditorManager):
""" An editor manager that supports the editor memento protocol. """
#######################################################################
# 'IEditorManager' interface.
#######################################################################
def get_editor_memento(self, editor):
""" Return the state of the editor contents. """
# Return the data attributes as a tuple.
return (editor.obj.name, editor.obj.age)
def set_editor_memento(self, memento):
""" Restore an editor from a memento and return it. """
# Create a new data object.
name, age = memento
person = Person(name=name, age=age)
# Create an editor for the data.
return self.create_editor(self.window, person, None)
class ExampleWorkbenchWindow(WorkbenchWindow):
""" A simple example of using the workbench window. """
#### 'WorkbenchWindow' interface ##########################################
# The available perspectives.
perspectives = [
Perspective(
name = 'Foo',
contents = [
PerspectiveItem(id='Black', position='bottom', height=0.1),
PerspectiveItem(id='Debug', position='left', width=0.25)
]
),
Perspective(
name = 'Bar',
contents = [
PerspectiveItem(id='Black', position='top'),
PerspectiveItem(id='Blue', position='bottom'),
PerspectiveItem(id='Green', position='left'),
PerspectiveItem(id='Red', position='right'),
PerspectiveItem(id='Debug', position='left')
]
)
]
#### 'ExampleWorkbenchWindow' interface ###################################
# The view factories.
#
# fixme: This should be part of the standadr 'WorkbenchWindow'!
view_factories = List(Callable)
#### Private interface ####################################################
# The Exit action.
_exit_action = Instance(Action)
# The New Person action.
_new_person_action = Instance(Action)
###########################################################################
# 'ApplicationWindow' interface.
###########################################################################
#### Trait initializers ###################################################
def _editor_manager_default(self):
""" Trait initializer.
Here we return the replacement editor manager.
"""
return ExampleEditorManager()
def _menu_bar_manager_default(self):
""" Trait initializer. """
file_menu = MenuManager(
self._new_person_action, self._exit_action,
name='&File', id='FileMenu'
)
view_menu = ViewMenuManager(name='&View', id='ViewMenu', window=self)
return MenuBarManager(file_menu, view_menu, window=self)
def _tool_bar_managers_default(self):
""" Trait initializer. """
# Add multiple (albeit identical!) tool bars just to show that it is
# allowed!
tool_bar_managers = [
ToolBarManager(
self._exit_action, show_tool_names = False, name=str(i)
)
for i in range(5)
]
return tool_bar_managers
###########################################################################
# 'WorkbenchWindow' interface.
###########################################################################
#### Trait initializers ###################################################
def _view_factories_default(self):
""" Trait initializer. """
from pyface.workbench.debug.api import DebugView
return [DebugView, BlackView, BlueView, GreenView, RedView, YellowView]
def _views_default(self):
""" Trait initializer. """
# Using an initializer makes sure that every window instance gets its
# own view instances (which is necessary since each view has a
# reference to its toolkit-specific control etc.).
return [factory(window=self) for factory in self.view_factories]
###########################################################################
# Private interface.
###########################################################################
def __exit_action_default(self):
""" Trait initializer. """
return Action(name='E&xit', on_perform=self.workbench.exit)
def __new_person_action_default(self):
""" Trait initializer. """
return Action(name='New Person', on_perform=self._new_person)
def _new_person(self):
""" Create a new person. """
from person import Person
self.workbench.edit(Person(name='New', age=100))
return
#### EOF ######################################################################
| {
"repo_name": "geggo/pyface",
"path": "examples/workbench/example_workbench_window.py",
"copies": "5",
"size": "5632",
"license": "bsd-3-clause",
"hash": 8576807347832996000,
"line_mean": 31.7441860465,
"line_max": 79,
"alpha_frac": 0.5227272727,
"autogenerated": false,
"ratio": 4.780984719864176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026905684754521967,
"num_lines": 172
} |
""" A simple example on how to use the GaussianProcess class
in pybrain, for one and two dimensions. """
__author__ = "Thomas Rueckstiess, ruecksti@in.tum.de"
from pybrain.auxiliary import GaussianProcess
from pybrain.datasets import SupervisedDataSet
from scipy import mgrid, sin, cos, array, ravel
from pylab import show, figure
ds = SupervisedDataSet(1, 1)
gp = GaussianProcess(indim=1, start=-3, stop=3, step=0.05)
figure()
x = mgrid[-3:3:0.2]
y = 0.1*x**2 + x + 1
z = sin(x) + 0.5*cos(y)
ds.addSample(-2.5, -1)
ds.addSample(-1.0, 3)
gp.mean = 0
# new feature "autonoise" adds uncertainty to data depending on
# it's distance to other points in the dataset. not tested much yet.
# gp.autonoise = True
gp.trainOnDataset(ds)
gp.plotCurves(showSamples=True)
# you can also test the gp on single points, but this deletes the
# original testing grid. it can be restored with a call to _buildGrid()
print gp.testOnArray(array([[0.4]]))
# --- example on how to use the GP in 2 dimensions
ds = SupervisedDataSet(2,1)
gp = GaussianProcess(indim=2, start=0, stop=5, step=0.25)
figure()
x,y = mgrid[0:5:4j, 0:5:4j]
z = cos(x)*sin(y)
(x, y, z) = map(ravel, [x, y, z])
for i,j,k in zip(x, y, z):
ds.addSample([i, j], [k])
print "preparing plots. this can take a few seconds..."
gp.trainOnDataset(ds)
gp.plotCurves()
show() | {
"repo_name": "daanwierstra/pybrain",
"path": "examples/gaussprocess/gp.py",
"copies": "1",
"size": "1351",
"license": "bsd-3-clause",
"hash": -1303602226407199200,
"line_mean": 25,
"line_max": 71,
"alpha_frac": 0.6861584012,
"autogenerated": false,
"ratio": 2.6752475247524754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3861405925952475,
"avg_score": null,
"num_lines": null
} |
"""A simple example to demonstrate a thing with multiple ports. The
thing samples values from a sensor and sends them on different output
ports depending on the divisibility of the value. See docs/ports.rst
for a more detailed explanation.
"""
import random
import asyncio
from thingflow.base import OutputThing, InputThing, Scheduler,\
SensorAsOutputThing
class MultiPortOutputThing(OutputThing, InputThing):
def __init__(self, previous_in_chain):
super().__init__(ports=['divisible_by_two', 'divisible_by_three',
'other'])
# connect to the previous filter
self.disconnect_from_upstream = previous_in_chain.connect(self)
def on_next(self, x):
val = int(round(x.val))
if (val%2)==0:
self._dispatch_next(val, port='divisible_by_two')
if (val%3)==0:
self._dispatch_next(val, port='divisible_by_three')
if (val%3)!=0 and (val%2)!=0:
self._dispatch_next(val, port='other')
def on_completed(self):
self._dispatch_completed(port='divisible_by_two')
self._dispatch_completed(port='divisible_by_three')
self._dispatch_completed(port='other')
def on_error(self, e):
self._dispatch_error(e, port='divisible_by_two')
self._dispatch_error(e, port='divisible_by_three')
self._dispatch_error(e, port='other')
def __repr__(self):
return 'MultiPortOutputThing()'
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield random.gauss(mean, stddev)
else: # go on forever
def generator():
while True:
yield random.gauss(mean, stddev)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
scheduler = Scheduler(asyncio.get_event_loop())
sensor = SensorAsOutputThing(RandomSensor(1, mean=10, stddev=5,
stop_after_events=10))
mtthing = MultiPortOutputThing(sensor)
mtthing.connect(lambda v: print("even: %s" % v),
port_mapping=('divisible_by_two', 'default'))
mtthing.connect(lambda v: print("divisible by three: %s" % v),
port_mapping=('divisible_by_three', 'default'))
mtthing.connect(lambda v: print("not divisible: %s" % v),
port_mapping=('other', 'default'))
mtthing.print_downstream()
scheduler.schedule_recurring(sensor)
scheduler.run_forever()
| {
"repo_name": "mpi-sws-rse/thingflow-python",
"path": "examples/multi_port_example.py",
"copies": "1",
"size": "3182",
"license": "apache-2.0",
"hash": 8192177086743224000,
"line_mean": 38.2839506173,
"line_max": 83,
"alpha_frac": 0.6008799497,
"autogenerated": false,
"ratio": 3.8062200956937797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9791204770587891,
"avg_score": 0.023179054961177795,
"num_lines": 81
} |
"""A simple example to demonstrate publishing on multiple topics. The
publisher samples values from a sensor and publishes them on different output
ports depending on the divisibility of the value. See docs/pubsub-topics.rst
for a more detailed explanation.
"""
import random
import asyncio
from antevents.base import Publisher, DirectPublisherMixin, Scheduler, FatalError
class MultiTopicPublisher(Publisher, DirectPublisherMixin):
def __init__(self, sensor):
super().__init__(topics=['divisible_by_two', 'divisible_by_three',
'other'])
self.sensor = sensor
def _observe(self):
try:
val = int(round(self.sensor.sample()))
if (val%2)==0:
self._dispatch_next(val, topic='divisible_by_two')
if (val%3)==0:
self._dispatch_next(val, topic='divisible_by_three')
if (val%3)!=0 and (val%2)!=0:
self._dispatch_next(val, topic='other')
except FatalError:
raise
except StopIteration:
self._dispatch_completed(topic='divisible_by_two')
self._dispatch_completed(topic='divisible_by_three')
self._dispatch_completed(topic='other')
except Exception as e:
self._dispatch_error(e, topic='divisible_by_two')
self._dispatch_error(e, topic='divisible_by_three')
self._dispatch_error(e, topic='other')
def __repr__(self):
return 'MultiTopicPublisher()'
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield random.gauss(mean, stddev)
else: # go on forever
def generator():
while True:
yield random.gauss(mean, stddev)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
scheduler = Scheduler(asyncio.get_event_loop())
sensor = RandomSensor(1, mean=10, stddev=5, stop_after_events=10)
pub = MultiTopicPublisher(sensor)
pub.subscribe(lambda v: print("even: %s" % v),
topic_mapping=('divisible_by_two', 'default'))
pub.subscribe(lambda v: print("divisible by three: %s" % v),
topic_mapping=('divisible_by_three', 'default'))
pub.subscribe(lambda v: print("not divisible: %s" % v),
topic_mapping=('other', 'default'))
scheduler.schedule_recurring(pub)
scheduler.run_forever()
| {
"repo_name": "mpi-sws-rse/antevents-python",
"path": "examples/multi_topic_publisher.py",
"copies": "1",
"size": "3121",
"license": "apache-2.0",
"hash": 3342870508171240400,
"line_mean": 38.5063291139,
"line_max": 83,
"alpha_frac": 0.5966036527,
"autogenerated": false,
"ratio": 3.915934755332497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012538408032496,
"avg_score": null,
"num_lines": null
} |
""" A simple example to show how to access a 3D numpy array. One
example shows how to access the numpy array using blitz type
converters and the other shows how it can be done without using blitz
by accessing the numpy array data directly.
"""
from __future__ import absolute_import, print_function
import scipy.weave as weave
from scipy.weave import converters
import numpy
def create_array():
"""Creates a simple 3D numpy array with unique values at each
location in the matrix.
"""
rows, cols, depth = 2, 3, 4
arr = numpy.zeros((rows, cols, depth), 'i')
count = 0
for i in range(rows):
for j in range(cols):
for k in range(depth):
arr[i,j,k] = count
count += 1
return arr
def pure_inline(arr):
"""Prints the given 3D array by accessing the raw numpy data and
without using blitz converters.
Notice the following:
1. '\\n' to escape generating a newline in the C++ code.
2. rows, cols = Narr[0], Narr[1].
3. Array access using arr[(i*cols + j)*depth + k].
"""
code = """
int rows = Narr[0];
int cols = Narr[1];
int depth = Narr[2];
for (int i=0; i < rows; i++)
{
for (int j=0; j < cols; j++)
{
printf("img[%3d][%3d]=", i, j);
for (int k=0; k< depth; ++k)
{
printf(" %3d", arr[(i*cols + j)*depth + k]);
}
printf("\\n");
}
}
"""
weave.inline(code, ['arr'])
def blitz_inline(arr):
"""Prints the given 3D array by using blitz converters which
provides a numpy-like syntax for accessing the numpy data.
Notice the following:
1. '\\n' to escape generating a newline in the C++ code.
2. rows, cols = Narr[0], Narr[1].
3. Array access using arr(i, j, k).
"""
code = """
int rows = Narr[0];
int cols = Narr[1];
int depth = Narr[2];
for (int i=0; i < rows; i++)
{
for (int j=0; j < cols; j++)
{
printf("img[%3d][%3d]=", i, j);
for (int k=0; k< depth; ++k)
{
printf(" %3d", arr(i, j, k));
}
printf("\\n");
}
}
"""
weave.inline(code, ['arr'], type_converters=converters.blitz)
def main():
arr = create_array()
print("numpy:")
print(arr)
print("Pure Inline:")
pure_inline(arr)
print("Blitz Inline:")
blitz_inline(arr)
if __name__ == '__main__':
main()
| {
"repo_name": "Kamp9/scipy",
"path": "scipy/weave/examples/array3d.py",
"copies": "100",
"size": "2521",
"license": "bsd-3-clause",
"hash": 2937314798345563000,
"line_mean": 22.5607476636,
"line_max": 69,
"alpha_frac": 0.5295517652,
"autogenerated": false,
"ratio": 3.4392905866302863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A simple exploration of graphs and various functions to work with them.
"""
A graph is a way of representing some kind of relationship between datum;
such as the distances between towns, the cost of a decision or the impact of
an action, latency between a server and users, the routes a packet could
take across a large network... The layout of a directory tree on a computer.
All kinds of things can be represented as one form or another of graph.
Typically each point of interest in the graph is called a Node. Nodes usually
have at least one interesting property (name, number, value) and the property
of having potentially one or more connections to other nodes.
Depending on nuances of the specific type of graph, nodes may be described
in other terms (parent, leaf, branch, root, etc) to describe their role
within the larger graph, but ultimately all of these are names for Nodes.
Again, the most important property of Nodes is their connectedness. The
connections between nodes are usually referred to as an Edge, and edges can
themselves have values and/or properties.
There is a wide spectrum of nomenclature for the elements of graphs, but they
can usually be reasoned relatively easily: a graph that flows in one
direction may refer to the nodes at the far end of edges as "descendants", or
"children"; if nodes have a maximum of two, directional edges, they may be
referred to as "binary nodes" or the edges may be referred to as "left" and
"right".
Most often such binary graphs produce what is commonly called a "tree".
A -> (B or C), B -> (D or E) ...
Examples:
Family tree: Nodes represent family members, with a name as their values;
You could use bi-directional edges that represent a biological
link; or you could have uni-directional edges that are
labelled with what type of relationship (mother, father, son,
daughter).
Junctions: Nodes represent street intersections with edges representing
connections between them and the distance required to travel.
Although there are many kinds of graphs, there are also many, many core
algorithms common to all of them.
Consider the following "BinaryTreeNode". 'value' could be anything from a
simple number, to a string to a complex object in a database.
'left' and 'right' are optional edges linking to other Nodes.
A Tree would thus be formed by creating a "root" node, creating additional
nodes and associating them via the left or right members of other nodes.
Lets quickly build a tree to model how I might relay a card through a
series of friends, starting with myself and the person I want the card to reach:
me = BinaryTreeNode("oliver")
goal = BinaryTreeNode("john")
There are two people I might hand the card to who are likely to have connections
that would be able to forward it to John:
meg = BinaryTreeNode("meg")
jared = BinaryTreeNode("jared")
I'm going to (arbitrarily) assign one of these to be the "left" branch of my
tree and one the right:
me.left = meg
me.right = jared
I could just as easily have called them "up" and "down", "east" and "west",
"first" and "second".
Both of these individuals have friends they could relay the card to:
eva, nico = BinaryTreeNode("eva"), BinaryTreeNode("nico")
meg.left, meg.right = eva, nico
Jared only knows one person, whether we assign this connection as left or
right is entirely up to us and/or the rules of our graph. I choose left.
Jared's friend knows someone called Bill who knows John.
mark = BinaryTreeNode("mark")
jared.left = mark
bill = BinaryTreeNode("bill")
mark.left = bill
Finally, lets say that both "Bill" and "Eva" know "john", the individual
I want the card forwarded to:
bill.left, eva.left = goal
This series of relationships forms a Tree. We didn't need a discrete object
to describe it, we can essentially say:
tree = me
(that is the "tree" is represented by the first node).
We could, if we wanted, create a discrete object/class for the tree, but that
is an option if we wanted to.
You might want to take a moment to draw this graph out and ask yourself what
information is present on it, what questions you might ask of it, and think
about what problems you might have accessing useful properties of this
structure.
"""
class BinaryTreeNode(object):
# A 'binary tree' is nothing more than a hierarchy of nodes which
# are connected 'downwards' by a maximum of two edges: left and right.
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# People
oliver = BinaryTreeNode("Oliver")
meg = BinaryTreeNode("Meg")
jared = BinaryTreeNode("Jared")
eva = BinaryTreeNode("Eva")
nico = BinaryTreeNode("Nico")
mark = BinaryTreeNode("Mark")
bill = BinaryTreeNode("Bill")
john = BinaryTreeNode("John")
# The root of the tree and the goal of the exercise.
tree = oliver
goal = john
# Edges
oliver.left, oliver.right = meg, jared
meg.left, meg.right = eva, nico
jared.left = mark
eva.left = john
mark.left = bill
bill.left = john
# Questions:
# . How many nodes are in our graph? (without simply counting the list above)
# . What is the shortest path to 'goal'?
# . What is the longest path?
# . What is the average path?
# . How many distinct paths are there that reach John?
# . How many paths do *not* reach John?
# . Are there any loops?
# . How would you print all the nodes in the graph?
# . How would you print all the nodes in the graph that lead to john?
##### WORK IN PROGRESS #####
| {
"repo_name": "kfsone/tinker",
"path": "python/graph.py",
"copies": "1",
"size": "5613",
"license": "mit",
"hash": -2161422646755011000,
"line_mean": 34.7515923567,
"line_max": 80,
"alpha_frac": 0.7277748085,
"autogenerated": false,
"ratio": 3.8683666436940043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5096141452194004,
"avg_score": null,
"num_lines": null
} |
"""A simple extension that fires a Jenkins job for incoming heads."""
from contextlib import closing
from urllib import urlencode
import re
import urllib2
import urlparse
from mercurial import util
BUILD_URL = 'job/{job}/buildWithParameters'
def reposetup(ui, repo):
"""Set up the Jenkins notification hook.
:param ui: Mercurial ui object
:param repo: Mercurial repository object
"""
ui.setconfig("hooks", "changegroup.poke_jenkins", poke_jenkins_hook)
def poke_jenkins_hook(ui, repo, node, **kwargs):
"""Filter out the incoming heads and start a Jenkins job for them.
:param ui: Mercurial ui object
:param repo: Mercurial repository object
:param node: Mercurial node object (eg commit)
"""
jenkins_base_url = ui.config('poke_jenkins', 'jenkins_base_url', default=None, untrusted=False)
if not jenkins_base_url:
raise util.Abort(
'You have to specify the parameter jenkins_base_url '
'in the section poke_jenkins.'
)
timeout = int(ui.config('poke_jenkins', 'timeout', default=10, untrusted=False))
repo_url = ui.config('poke_jenkins', 'repo_url', default=None, untrusted=False)
if not repo_url:
raise util.Abort(
'You have to specify the parameter repo_url '
'in the section poke_jenkins.'
)
jobs = ui.configlist('poke_jenkins', 'jobs', default=[], untrusted=False)
tag = ui.config('poke_jenkins', 'tag', default='', untrusted=False)
username = ui.config('poke_jenkins', 'username', default='', untrusted=False)
password = ui.config('poke_jenkins', 'password', default='', untrusted=False)
branch_regex = ui.config('poke_jenkins', 'branch_regex', default=None, untrusted=False)
if branch_regex:
branch_regex = re.compile(branch_regex)
branches = {}
# Collect the incoming heads that don't have any children.
for rev in xrange(repo[node].rev(), len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if not any(ctx.children()):
branches[branch] = ctx.hex()
if username and password:
headers = {
'Authorization':
'Basic {0}'.format('{0}:{1}'.format(username, password).encode('base64').replace('\n', ''))
}
else:
headers = {}
# For every head start a Jenkins job.
for branch, rev in sorted(branches.items()):
if branch_regex is None or branch_regex.match(branch):
for job in jobs:
base = urlparse.urljoin(jenkins_base_url, BUILD_URL.format(job=job))
args = urlencode([('TAG', tag), ('NODE_ID', rev), ('REPO_URL', repo_url), ('BRANCH', branch)])
url = '?'.join([base, args])
request = urllib2.Request(url, '', headers)
with closing(urllib2.urlopen(request, timeout=timeout)) as f:
ui.write('Starting the job {job} for the branch: {branch}, revision: {rev}\n'.format(
job=job, branch=branch, rev=rev))
f.read()
| {
"repo_name": "paylogic/poke-jenkins",
"path": "poke_jenkins.py",
"copies": "1",
"size": "3069",
"license": "mit",
"hash": 2085727454695924200,
"line_mean": 35.5357142857,
"line_max": 110,
"alpha_frac": 0.6158357771,
"autogenerated": false,
"ratio": 3.9702457956015524,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086081572701553,
"avg_score": null,
"num_lines": null
} |
"""A simple extension that renders cells as jinja templates.
For demonstration purposes, this renders in a simple environment with `solution=True`,
so that a solution notebook *template* will be executable.
Input with:
{% if solution %}
solution_code
{% else %}
student_code
{% endif %}
will be executable as the solution version.
"""
from __future__ import print_function
import sys
import jinja2
from IPython.core.inputtransformer import InputTransformer
class SolutionInputTransformer(InputTransformer):
"""Renders IPython input cells as jinja templates with solution=True"""
def __init__(self, *args, **kwargs):
super(SolutionInputTransformer, self).__init__(*args, **kwargs)
self.env = jinja2.Environment()
self._lines = []
def push(self, line):
self._lines.append(line)
return None
def reset(self):
text = u'\n'.join(self._lines)
self._lines = []
template = self.env.from_string(text)
try:
return template.render(solution=True)
except Exception as e:
print("Failed to render jinja template: %s" % e, file=sys.stderr)
return text
def load_ipython_extension(ip):
"""register the transformer as the first physical line transform."""
ip.input_transformer_manager.python_line_transforms.append(
SolutionInputTransformer()
)
| {
"repo_name": "danielballan/ipython_extensions",
"path": "extensions/jinjasolution.py",
"copies": "4",
"size": "1427",
"license": "bsd-3-clause",
"hash": 858251839902707200,
"line_mean": 26.4423076923,
"line_max": 86,
"alpha_frac": 0.654519972,
"autogenerated": false,
"ratio": 4.285285285285285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6939805257285285,
"avg_score": null,
"num_lines": null
} |
"""A simple fcntl/flock implementation"""
from os import getpid
from fcntl import flock, LOCK_EX, LOCK_UN, LOCK_NB
class LockError(Exception):
"""Raised when an error is encountered during a lock operation"""
def __init__(self, message, exc=None):
Exception.__init__(self, message, exc)
self.message = message
self.exc = exc
class Lock(object):
"""A simple flock based file lock implementation"""
def __init__(self, path):
self.path = path
self.lock = None
def acquire(self):
"""Acquire a lock on the path associated with this lock object"""
if self.is_locked():
return
try:
self.lock = open(self.path, 'r')
flock(self.lock, LOCK_EX|LOCK_NB)
except IOError, exc:
self.lock = None
raise LockError(str(exc), exc)
else:
return True
def is_locked(self):
return self.lock is not None
def release(self):
"""Release a currently open lock"""
if self.lock is None:
raise LockError("No lock acquired to release")
try:
self.acquire()
flock(self.lock, LOCK_UN)
self.lock = None
except IOError, exc:
raise LockError(str(exc), exc)
else:
return True
| {
"repo_name": "m00dawg/holland",
"path": "holland/core/util/lock.py",
"copies": "1",
"size": "1346",
"license": "bsd-3-clause",
"hash": -8459301700717894000,
"line_mean": 25.92,
"line_max": 73,
"alpha_frac": 0.5631500743,
"autogenerated": false,
"ratio": 4.128834355828221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.519198443012822,
"avg_score": null,
"num_lines": null
} |
"""A simple file and folder watcher."""
import os
from typing import Iterable
from typing import List
class Watcher:
"""A simple file and folder watcher.
For folders, only checks mtime of the folder and all subdirectories.
So a file change won't be noticed, but only new/deleted files.
"""
__slots__ = ["_files", "_folders", "_last_checked"]
def __init__(self) -> None:
self._files: List[str] = []
self._folders: List[str] = []
self._last_checked = 0
def update(self, files: Iterable[str], folders: Iterable[str]) -> None:
"""Update the folders/files to watch.
Args:
files: A list of file paths.
folders: A list of paths to folders.
"""
self._files = list(files)
self._folders = list(folders)
self.check()
def check(self) -> bool:
"""Check for changes.
Returns:
`True` if there was a file change in one of the files or folders,
`False` otherwise.
"""
latest_mtime = 0
for path in self._files:
try:
mtime = os.stat(path).st_mtime_ns
except FileNotFoundError:
return True
if mtime > latest_mtime:
latest_mtime = mtime
for path in self._folders:
for dirpath, _, _ in os.walk(path):
mtime = os.stat(dirpath).st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
changed = bool(latest_mtime != self._last_checked)
self._last_checked = latest_mtime
return changed
| {
"repo_name": "beancount/fava",
"path": "src/fava/core/watcher.py",
"copies": "2",
"size": "1644",
"license": "mit",
"hash": 1383789833056331300,
"line_mean": 28.8909090909,
"line_max": 77,
"alpha_frac": 0.5474452555,
"autogenerated": false,
"ratio": 4.303664921465969,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
# a simple file extractor
# TODO:
# classify or clean
# PNG TIFF JPG BMP
# Aplib'ed-EXE
# xor-ed information
import os
import sys
import struct
import zlib
def extract(file, data, offset, size, type):
print "found a %s file @ offset %i, size %i" % (file, offset, size)
with open("%s-%s(%04x-%i)" % (file, type, offset, offset + size), "wb") as f:
f.write(data)
return
for root, dirs, files in os.walk('.'):
for file_ in files[:]:
fn = root + '\\' + file_
print fn
with open(fn, "rb") as f:
r = f.read()
fullsize = len(r)
# FWS
off = 1
MAGIC = "FWS"
MIN_SIZE = 8
i = r.find(MAGIC, off)
while (i > -1):
if i + MIN_SIZE >= fullsize:
break
o2 = i + len(MAGIC)
version = ord(r[o2])
if version >= 0x0a:
break
o2 += 1
size_ = struct.unpack("<I", r[o2:o2 + 4])[0]
o2 += 4
extract(file_, r[i:i + size_], i, size_ - i, "FWS")
off = o2
i = r.find(MAGIC, off)
# CWS
off = 1
MAGIC = "CWS"
MIN_SIZE = 8
i = r.find(MAGIC, off)
while (i > -1):
if i + MIN_SIZE >= fullsize:
break
o2 = i + len(MAGIC)
version = ord(r[o2])
if version >= 0x0a:
break
o2 += 1
theosize_ = struct.unpack("<I", r[o2:o2 + 4])[0]
o2 += 4
extract(file_, r[i:i + fullsize - i ], i, size_, "CWS") # unknown real size for now
dec = zlib.decompress(r[o2:])
decfile = "".join(
["FWS",
chr(version),
struct.pack("<I", theosize_),
dec,
])
# if len(dec) + 8 != theosize_: Warning "unexpected length"
extract(file_, decfile, i, size_, "FWS")
#TODO: compression relation
off = o2
i = r.find(MAGIC, off)
# PE
off = 1
MAGIC = "MZ"
MIN_SIZE = 90
i = r.find(MAGIC, off)
while (i > -1):
if i + MIN_SIZE >= fullsize:
break
o2 = i + len(MAGIC)
temp = i + ord(r[i + 0x3c])
if r[temp:temp + 2] != "PE":
off = o2
i = r.find(MAGIC, off)
continue
extract(file_, r[i:i + fullsize - i ], i, size_, "PE") # unknown real size for now
off = o2 # a bigger gap would be better, however it would be possible to 'interlace' 2 PE signatures
i = r.find(MAGIC, off)
template = """
off = 1
MAGIC = "MZ"
MIN_SIZE = 90
i = r.find(MAGIC, off)
while (i > -1):
if i + MIN_SIZE >= fullsize:
break
o2 = i + len(MAGIC)
off = o2
i = r.find(MAGIC, off)
""" | {
"repo_name": "angea/corkami",
"path": "misc/python/ex.py",
"copies": "1",
"size": "3166",
"license": "bsd-2-clause",
"hash": -4501369263219005400,
"line_mean": 24.4,
"line_max": 112,
"alpha_frac": 0.4001895136,
"autogenerated": false,
"ratio": 3.6474654377880182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9525253480134916,
"avg_score": 0.004480294250620294,
"num_lines": 120
} |
# A simple file moving module that renames the file
# to its sha1 name in the media directory
import logging
import os
import guid
from baseaction import BaseAction
from ..processingthread import na
from fileprocess.configuration import config
from shutil import move
log = logging.getLogger(__name__)
class Mover(BaseAction):
def __init__(self):
super(Mover, self).__init__()
self.to = config['media_dir']
self.frm = config['upload_dir']
def process(self, file):
"""
The overridden process function, moves the file and renames it.
Assumes the file is rewound
"""
if not file.has_key('fname'):
return file
log.debug('Moving %s', file['fname'])
to = os.path.join(self.to, guid.generate())
if not os.path.isabs(file['fname']):
frm = os.path.join(self.frm, file['fname'])
else:
frm = file['fname']
if not os.path.exists(frm):
log.info("Given filename does not exist, bailing")
file['msg'] = "File does not exist"
file['na'] = na.FAILURE
self.cleanup(file)
return False
move(frm, to)
file["fname"] = to
return file
| {
"repo_name": "JustinTulloss/harmonize.fm",
"path": "fileprocess/fileprocess/actions/mover.py",
"copies": "1",
"size": "1272",
"license": "mit",
"hash": 1788219548217119700,
"line_mean": 26.652173913,
"line_max": 71,
"alpha_frac": 0.5786163522,
"autogenerated": false,
"ratio": 4.090032154340836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168648506540836,
"avg_score": null,
"num_lines": null
} |
# A simple file system based group store.
#
import os, binascii, shutil, json
from datetime import datetime
DEFAULT_STORE_DIR = '.groups'
STORE_FILE_SUFFIX = '.json'
class FileGroupStore():
def __init__(self, cleanup, store_dir=None):
if not store_dir:
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
store_dir = dir_path + '/' + DEFAULT_STORE_DIR
self.store_dir = store_dir
if cleanup:
self.reset()
def reset(self):
if os.path.exists(self.store_dir):
shutil.rmtree(self.store_dir)
def get_group(self, group_name_or_id):
file_path = self.group_id_to_file(group_name_or_id)
try:
with open(file_path, 'r') as group_file:
group = json.load(group_file)
return group
except IOError:
for group in self.list_groups():
if group['Name'] == group_name_or_id:
return group
return None # Group not found
def put_group(self, group):
if 'Id' in group:
group_id = group['Id']
else:
group_id = binascii.hexlify(os.urandom(8))
group['Id'] = group_id
file_path = self.group_id_to_file(group_id)
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(file_path, 'w') as group_file:
json.dump(group, group_file)
return group_id
def create_group(self, group):
group['Status'] = "CREATE_COMPLETE"
group['Creation_time'] = datetime.utcnow().isoformat()
return self.put_group(group)
def update_group(self, group):
group['Status'] = "UPDATE_COMPLETE"
group['Updated_time'] = datetime.utcnow().isoformat()
return self.put_group(group)
def delete_group(self, group):
file_path = self.group_id_to_file(group['Id'])
os.unlink(file_path)
def list_groups(self):
group_list = []
if os.path.exists(self.store_dir):
for f in os.listdir(self.store_dir):
group_id = f[:len(f)-len(STORE_FILE_SUFFIX)]
group_list.append(self.get_group(group_id))
return group_list
def group_id_to_file(self, group_id):
return self.store_dir + '/' + group_id + STORE_FILE_SUFFIX
if __name__ == "__main__":
testgroup1 = json.loads(
'''
{
"Name": "MyGroup1",
"Memory":0,
"CpuShares": 512,
"Env":null,
"Cmd":[
"date"
],
"Image":"ubuntu",
"WorkingDir":"",
"RestartPolicy": { "Name": "always", "HealthCheckType" : "HttpHealthCheck", "HealthCheckUrl":"/ping" },
"NumberInstances": {"Desired": 2, "Min": 1, "Max": 4},
"AutoScalingPolicy" : {}
}
''')
testgroup2 = json.loads(
'''
{
"Name": "MyGroup2",
"Memory":0,
"CpuShares": 512,
"Env":null,
"Cmd":[
"date"
],
"Image":"centos",
"WorkingDir":"",
"RestartPolicy": { "Name": "always", "HealthCheckType" : "HttpHealthCheck", "HealthCheckUrl":"/ping" },
"NumberInstances": {"Desired": 2, "Min": 1, "Max": 4},
"AutoScalingPolicy" : {}
}
''')
store = FileGroupStore(True)
store.put_group(testgroup1)
store.put_group(testgroup2)
print(json.dumps(store.list_groups()))
| {
"repo_name": "xdevops/ibm-containers-emulator",
"path": "api/groupstore.py",
"copies": "1",
"size": "3623",
"license": "apache-2.0",
"hash": -3795921389462529500,
"line_mean": 31.3482142857,
"line_max": 113,
"alpha_frac": 0.5200110406,
"autogenerated": false,
"ratio": 3.7197125256673513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47397235662673515,
"avg_score": null,
"num_lines": null
} |
"""A simple filter that thresholds on input data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2010, Enthought, Inc.
# License: BSD Style.
import numpy as np
# Enthought library imports.
from traits.api import Instance, Range, Float, Bool, \
Property, Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.filter import Filter
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `Threshold` class.
######################################################################
class Threshold(Filter):
# The version of this class. Used for persistence.
__version__ = 0
# The threshold filter used.
threshold_filter = Property(Instance(tvtk.Object, allow_none=False), record=True)
# The filter type to use, specifies if the cells or the points are
# cells filtered via a threshold.
filter_type = Enum('cells', 'points',
desc='if thresholding is done on cells or points')
# Lower threshold (this is a dynamic trait that is changed when
# input data changes).
lower_threshold = Range(value=-1.0e20,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the lower threshold of the filter')
# Upper threshold (this is a dynamic trait that is changed when
# input data changes).
upper_threshold = Range(value=1.0e20,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the upper threshold of the filter')
# Automatically reset the lower threshold when the upstream data
# changes.
auto_reset_lower = Bool(True, desc='if the lower threshold is '
'automatically reset when upstream '
'data changes')
# Automatically reset the upper threshold when the upstream data
# changes.
auto_reset_upper = Bool(True, desc='if the upper threshold is '
'automatically reset when upstream '
'data changes')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['poly_data',
'unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
# Our view.
view = View(Group(Group(Item(name='filter_type'),
Item(name='lower_threshold'),
Item(name='auto_reset_lower'),
Item(name='upper_threshold'),
Item(name='auto_reset_upper')),
Item(name='_'),
Group(Item(name='threshold_filter',
show_label=False,
visible_when='object.filter_type == "cells"',
style='custom', resizable=True)),
),
resizable=True
)
########################################
# Private traits.
# These traits are used to set the limits for the thresholding.
# They store the minimum and maximum values of the input data.
_data_min = Float(-1e20)
_data_max = Float(1e20)
# The threshold filter for cell based filtering
_threshold = Instance(tvtk.Threshold, args=(), allow_none=False)
# The threshold filter for points based filtering.
_threshold_points = Instance(tvtk.ThresholdPoints, args=(), allow_none=False)
# Internal data to
_first = Bool(True)
######################################################################
# `object` interface.
######################################################################
def __get_pure_state__(self):
d = super(Threshold, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('_first', '_data_min', '_data_max'):
d.pop(name, None)
return d
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
attrs = ['all_scalars', 'attribute_mode',
'component_mode', 'selected_component']
self._threshold.on_trait_change(self._threshold_filter_edited,
attrs)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
# By default we set the input to the first output of the first
# input.
fil = self.threshold_filter
self.configure_connection(fil, self.inputs[0])
self._update_ranges()
self._set_outputs([self.threshold_filter.output])
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
if len(self.inputs) == 0:
return
self._update_ranges()
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# Non-public interface
######################################################################
def _lower_threshold_changed(self, new_value):
fil = self.threshold_filter
fil.threshold_between(new_value, self.upper_threshold)
fil.update()
self.data_changed = True
def _upper_threshold_changed(self, new_value):
fil = self.threshold_filter
fil.threshold_between(self.lower_threshold, new_value)
fil.update()
self.data_changed = True
def _update_ranges(self):
"""Updates the ranges of the input.
"""
data_range = self._get_data_range()
if len(data_range) == 0:
return
dr = data_range
if self._first:
self._data_min, self._data_max = dr
self.set(lower_threshold = dr[0], trait_change_notify=False)
self.upper_threshold = dr[1]
self._first = False
return
# Decide whether to change 'lower' or 'upper' first, to avoid
# ending up with inconsistent bounds (max < min) in the lower_threshold
# and upper_threshold Range traits.
if dr[0] <= self._data_min:
# Safe to change lower bound first: intermediate range is [dr[0],
# self._data_max], and dr[0] <= self._data_min <= self._data_max.
change_lower_first = True
else:
# Safe to change upper bound first: intermediate range is [self._data_min, dr[1]],
# and self._data_min < dr[0] <= dr[1].
change_lower_first = False
if change_lower_first:
if self.auto_reset_lower:
self._data_min = dr[0]
notify = not self.auto_reset_upper
self.set(lower_threshold = dr[0],
trait_change_notify=notify)
if self.auto_reset_upper:
self._data_max = dr[1]
self.upper_threshold = dr[1]
else:
if self.auto_reset_upper:
self._data_max = dr[1]
notify = not self.auto_reset_lower
self.set(upper_threshold = dr[1],
trait_change_notify=notify)
if self.auto_reset_lower:
self._data_min = dr[0]
self.lower_threshold = dr[0]
def _get_data_range(self):
"""Returns the range of the input scalar data."""
input = self.inputs[0].outputs[0]
data_range = []
ps = input.point_data.scalars
cs = input.cell_data.scalars
# FIXME: need to be able to handle cell and point data
# together.
if ps is not None:
data_range = list(ps.range)
if np.isnan(data_range[0]):
data_range[0] = float(np.nanmin(ps.to_array()))
if np.isnan(data_range[1]):
data_range[1] = float(np.nanmax(ps.to_array()))
elif cs is not None:
data_range = cs.range
if np.isnan(data_range[0]):
data_range[0] = float(np.nanmin(cs.to_array()))
if np.isnan(data_range[1]):
data_range[1] = float(np.nanmax(cs.to_array()))
return data_range
def _auto_reset_lower_changed(self, value):
if len(self.inputs) == 0:
return
if value:
dr = self._get_data_range()
self._data_min = dr[0]
self.lower_threshold = dr[0]
def _auto_reset_upper_changed(self, value):
if len(self.inputs) == 0:
return
if value:
dr = self._get_data_range()
self._data_max = dr[1]
self.upper_threshold = dr[1]
def _get_threshold_filter(self):
if self.filter_type == 'cells':
return self._threshold
else:
return self._threshold_points
def _filter_type_changed(self, value):
if value == 'cells':
old = self._threshold_points
new = self._threshold
else:
old = self._threshold
new = self._threshold_points
self.trait_property_changed('threshold_filter', old, new)
def _threshold_filter_changed(self, old, new):
if len(self.inputs) == 0:
return
fil = new
self.configure_connection(fil, self.inputs[0])
fil.threshold_between(self.lower_threshold,
self.upper_threshold)
fil.update()
self._set_outputs([fil.output])
def _threshold_filter_edited(self):
self.threshold_filter.update()
self.data_changed = True
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/filters/threshold.py",
"copies": "3",
"size": "10670",
"license": "bsd-3-clause",
"hash": -1003981276403537200,
"line_mean": 35.9204152249,
"line_max": 94,
"alpha_frac": 0.5102155576,
"autogenerated": false,
"ratio": 4.464435146443515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6474650704043515,
"avg_score": null,
"num_lines": null
} |
# A simple find and replace filter for mcedit.
# Credits to SethBling's original findandreplace.py which can be found here:
# http://sethbling.com/downloads/mcedit-filters/findandreplace/
# This has been rewritten and uses none of the old code from the original filter
# If there are any issues or enhancement requests please submit a bug report at:
# https://github.com/qqii/mcedit-filters
import re
from pymclevel import TAG_String
from pymclevel import TAG_List
from pymclevel import TAG_Compound
displayName = "Simple Find and Replace"
inputs = (
("Formatting Character", ("string", "value=%%")),
("The formatting character is replaced by " + unichr(167) + ".", "label"),
("Blacklist", ("string", "value=id;")),
("A blacklist of names sepereated by \";\" which are not replaced.", "label"),
("Regex Mode", False),
("Enable finding via a python regular expression. Optionally end the regex with \"&&\" followed by the re.<LETTER> (e.g. <regex>&&M to enable multi line pattern searching).", "label"),
("Find", ("string","value=")),
("Replace", ("string","value=")),
)
# A blacklist of names for NBT tags that are not replaced
# By default this contains "id" as editing the id of mobs may cause unwanted effects
blacklist = []
compiledExpr = None
# The following functions finds and replaces text in string tags recursively
# Each of them all returns true if text was replaced
# A call should be made to replace_TAG_Compound with the entity or tile entity NBT compound tag
def replace_TAG_String(tagString, find, replace):
# This prevents people from trying to find and replace quotes in their text
# adding stuff to the start and end of their strings
old = tagString.value.strip('"')
if compiledExpr is None:
tagString.value = old.replace(find, replace)
else:
tagString.value = compiledExpr.sub(replace, old)
return not tagString.value == old
def replace_TAG_List(tagList, find, replace):
replaced = False
for tag in tagList:
if replace_TAG(tag, find, replace):
replaced = True
return replaced
def replace_TAG_Compound(tagCompound, find, replace):
replaced = False
for name, tag in tagCompound.iteritems():
if name not in blacklist:
if replace_TAG(tag, find, replace):
replaced = True
return replaced
def replace_TAG(tag, find, replace):
replaced = False
tagType = type(tag)
if tagType == TAG_String:
if replace_TAG_String(tag, find, replace):
replaced = True
elif tagType == TAG_List:
if replace_TAG_List(tag, find, replace):
replaced = True
elif tagType == TAG_Compound:
if replace_TAG_Compound(tag, find, replace):
replaced = True
return replaced
def perform(level, box, options):
global blacklist
global compiledExpr
formatChr = options["Formatting Character"]
blacklist = options["Blacklist"].split(";")
# unichr(167) is the formatting character for minecraft
find = options["Find"].replace(formatChr, unichr(167))
replace = options["Replace"].replace(formatChr, unichr(167))
if options["Regex Mode"]:
regex = find
flags = 0
if "&&" in find:
regex, strFlags = find.split("&&")
for letter in (l for l in re.__all__ if len(l) == 1):
if letter in strFlags:
flags |= re.__dict__[letter]
compiledExpr = re.compile(regex, flags)
else:
compiledExpr = None
for chunk, slices, point in level.getChunkSlices(box):
for compoundTag in chunk.getEntitiesInBox(box) + chunk.getTileEntitiesInBox(box):
# print compoundTag
if replace_TAG(compoundTag, find, replace):
chunk.dirty = True
| {
"repo_name": "qqii/mcedit-filters",
"path": "SimpleFindandReplace.py",
"copies": "1",
"size": "3823",
"license": "mit",
"hash": 766117001750149000,
"line_mean": 34.3981481481,
"line_max": 188,
"alpha_frac": 0.6578603191,
"autogenerated": false,
"ratio": 4.0115424973767055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002659485235764895,
"num_lines": 108
} |
"""A simple flow network module.
This module contains the classes and functions written to create, examine,
and solve simple problems on flow networks.
Copyright (C) 2017 Chris Digirolamo
"""
from collections import deque, OrderedDict
from decimal import Decimal
import weakref
INF = Decimal('Infinity')
class FlowNode(object):
"""
A node with flow network functionality. Automatically
creates, adjusts, and deletes edges and connects by
simply using the `add_flow` and `remove_flow` functions.
Args:
key (int): A unique identifier for the node.
Attributes:
key (int): A unique identifier for the node.
edge_flow (dict[FlowNode, int]): Maps the flow for this node.
Stores the flow of children nodes. Basically stores the
edge.
parents (set): The parents are the nodes that flow into
this node.
"""
def __init__(self, key):
self._parents = set([])
self._key = key
self.edge_flow = OrderedDict()
@property
def flow(self):
"""int: The total flow from this FlowNode."""
return sum(self.edge_flow.itervalues())
@property
def key(self):
"""object: The unique node identifier."""
return self._key
def add_flow(self, child, amount):
"""
Adds a a flow from this node to a child node.
If the edge already exists, adds to it, else
we create a new edge.
Args:
child (FlowNode): The child who receives
flow from this node.
amount (int): The amount of flow to add.
"""
if child is self:
raise ValueError("Cannot add flow to self.")
if child not in self.edge_flow:
self.edge_flow[child] = amount
child._parents.add(weakref.ref(self))
else:
self.edge_flow[child] += amount
def reduce_flow(self, child, amount=INF):
"""
Removes flow from an edge. If the flow becomes
<= 0, the edge is removed.
Args:
child (FlowNode): The child node to remove
flow from.
amount (int): The amount of flow to remove.
Defaults to infinity (to guarantees removal
of the edge)
"""
if child is self:
raise ValueError("Cannot remove flow from self.")
if amount < self.edge_flow[child]:
self.edge_flow[child] -= amount
else:
del self.edge_flow[child]
child._parents.remove(weakref.ref(self))
def iter_children(self):
"""Iterate through the nodes that this node flows into.
Yields:
FlowNode: Each child of this node.
"""
for child in self.edge_flow.iterkeys():
yield child
def iter_parents(self):
"""Iterate through the nodes that flow into this node.
Yields:
FlowNode: Each parents of this node.
"""
#Stored as weakrefs
for ref in self._parents:
parent = ref()
if parent is not None:
yield parent
def iter_edges(self):
"""Iterate through the edges of this node.
Yields:
tuple: (parent, child, capacity) of the flow.
"""
for child, capacity in self.edge_flow.iteritems():
yield (self, child, capacity)
def iter_dfs_edges(self):
""" A depth first search following the flow from this node.
Note:
The first node yielded is the calling instance itself.
Yields:
tuple[FlowNode]: The next (parent, child) nodes in the search.
"""
node_queue = deque([(self, c) for c in self.edge_flow.iterkeys()])
visited = set([c for p, c in node_queue])
visited.add(self)
while node_queue:
parent, child = node_queue.pop()
yield parent, child
for granchild in child.edge_flow.iterkeys():
if granchild not in visited:
visited.add(granchild)
node_queue.append((child, granchild))
def __repr__(self):
cls_name = self.__class__.__name__
text = "{}({})"
return text.format(cls_name, self.key)
class SuperSink(FlowNode):
"""The only sink node.
If there are multiple sinks in a graph, one instance of this node
can handle them.
Attributes:
consumed (int): As a sink, flows are consumed, this
counts the consumed flow.
"""
def __init__(self, *args, **kwargs):
super(SuperSink, self).__init__(*args, **kwargs)
self.consumed = 0
@property
def flow(self):
"""int: The total flow from this node."""
return INF
def add_flow(self, child, amount):
"""
Rather than adding flow from this node to a child node,
the sink consumes flow.
Args:
child (FlowNode): The child who receives
flow from this node.
amount (int): The amount of flow to add.
"""
self.consumed += amount
class SuperSource(FlowNode):
"""The only source node.
If there are multiple sources in a graph, one instance of this node
can handle them.
"""
class FlowNetwork(object):
"""
A class for flow network functionality. Flow networks created with this
class automatically reduce the network size by using one sink and one
source.
Attributes:
node_key_dict (OrderedDict[key, FlowNode]): The dict of all created
nodes.
"""
def __init__(self):
self._source = SuperSource("+")
self._sink = SuperSink("-")
start_d = [(self.source.key, self.source), (self.sink.key, self.sink)]
self.node_key_dict = OrderedDict(start_d)
@property
def source(self):
"""SuperSource: The only source node."""
return self._source
@property
def sink(self):
"""SuperSink: The only sink node."""
return self._sink
def get_source_sink_ids(self):
"""Gets the source and sink ids.
Returns:
tuple[FlowNode]: The source and sink.
"""
return self.source.key, self.sink.key
def get_node(self, node_id):
"""Gets the node from an id.
Args:
node_id (str): The unique id of the node.
Returns:
FlowNode: The node.
"""
return self.node_key_dict[node_id]
def add_flow_edge(self, parent, child, capacity):
"""
Adds a a flow from a parent node to a child node.
If the node and edge already exists, adds to it, else
we create a new node and/or edge.
Args:
parent (str): The key of the parent who is sending the
flow.
child (str): The key of the child who receives
flow from this node.
capacity (int): The max amount of flow to add.
"""
if isinstance(parent, FlowNode) or isinstance(child, FlowNode):
raise TypeError("add_flow_edge args are node keys, not nodes themselves.")
if parent not in self.node_key_dict:
self.node_key_dict[parent] = FlowNode(parent)
parent = self.node_key_dict[parent]
if child not in self.node_key_dict:
self.node_key_dict[child] = FlowNode(child)
child = self.node_key_dict[child]
parent.add_flow(child, capacity)
def get_maximum_flow(self):
"""Gets the maximum flow from source to sink.
Returns:
int: The maximum flow.
"""
new_copy = self.from_flow_network(self)
start_consumed = new_copy.sink.consumed
new_copy.send_max_flow_to_sink()
return new_copy.sink.consumed - start_consumed
def send_max_flow_to_sink(self):
"""
Sends the maximum amount of flow from the source to the sink.
Partially implements the Ford-Fulkerson maximum flow algorithm
to solve maximum flow by sending flow using function in this
FlowNetwork and the FlowNode class.
You end up finding the maximum flow if you check
how much the sink consumed.
"""
parent_map = self._get_dfs_sink_flow_path()
while parent_map:
minimum_path_flow = INF
node = self.sink
while node is not self.source:
parent = parent_map[node]
flow = parent.edge_flow[node]
minimum_path_flow = min(minimum_path_flow, flow)
node = parent
node = self.sink
while node is not self.source:
parent = parent_map[node]
flow = parent.edge_flow[node]
parent.reduce_flow(node, minimum_path_flow)
node.add_flow(parent, minimum_path_flow)
node = parent
parent_map = self._get_dfs_sink_flow_path()
def _get_dfs_sink_flow_path(self):
"""A single path from this node to the sink.
A breadth first search following the flow from
this node.
Returns:
dict[FlowNode, FlowNode]: Each child to parent in
the path.
"""
node_path = {}
for parent, child in self.source.iter_dfs_edges():
node_path[child] = parent
if child is self.sink:
break
if self.sink not in node_path:
return dict()
child = self.sink
parent = node_path[child]
exact_path = {child: parent}
while parent is not self.source:
child = parent
parent = node_path[child]
exact_path[child] = parent
return exact_path
def iter_edge_values(self):
"""Iterator of all edges.
Yields:
tuple[]: (parent, child, value) for all edges in the
flow network.
"""
visited = set()
for node in self.node_key_dict.values():
for parent, child in node.iter_dfs_edges():
edge = (parent, child)
if edge in visited:
continue
visited.add(edge)
yield parent, child, parent.edge_flow[child]
@classmethod
def from_adjacency_matrix(cls, adjacency_matrix, sources, sinks):
"""Creates a Flow Network from an adjacency matrix.
Args:
adjacency_matrix (list[list[int]): The adjacency matrix representation
of a flow graph.
sources (list[int]): list of integers denoting indexes of sources.
sinks (list[int]): list of integers denoting indexes of sinks.
"""
flow_network = cls()
key_to_node = {}
sources = set(sources)
sinks = set(sinks)
size = len(adjacency_matrix)
for node_idx in xrange(size):
if node_idx in sinks:
node = flow_network.sink
elif node_idx in sources:
node = flow_network.source
else:
node = FlowNode(node_idx)
key_to_node[node_idx] = node
# Add Flow
for node_idx in xrange(size):
row = adjacency_matrix[node_idx]
node = key_to_node[node_idx]
for next_node_idx in xrange(size):
next_node = key_to_node[next_node_idx]
if node is next_node:
continue
capacity = row[next_node_idx]
flow_network.add_flow_edge(node.key, next_node.key, capacity)
return flow_network
@classmethod
def from_flow_network(cls, flow_network):
"""Creates a copy of a flow network.
Args:
flow_network (FlowNetwork): The instance to copy.
"""
new_flow_network = cls()
for parent, child, capacity in flow_network.iter_edge_values():
new_flow_network.add_flow_edge(parent.key, child.key, capacity)
return new_flow_network
def __str__(self):
max_num = max(e[2] for e in self.iter_edge_values())
largest_str = len(str(max_num))
f_temp = " {:" + str(largest_str) + "}"
all_strings = []
for node in self.node_key_dict.itervalues():
node_strings = []
for node_2 in self.node_key_dict.itervalues():
if node_2 is node or node is self.sink:
txt = f_temp.format('-')
elif node_2 not in node.edge_flow:
txt = f_temp.format(0)
else:
txt = f_temp.format(node.edge_flow[node_2])
node_strings.append(txt)
node_string = ", ".join(node_strings)
node_string = "[{}], #".format(node_string)
if node is self.source:
node_string += " Source"
elif node is self.sink:
node_string += " Sink ({})".format(self.sink.consumed)
else:
node_string += " {}".format(node.key)
all_strings.append(node_string)
if all_strings:
all_strings[-1] = all_strings[-1].replace("], #", "]] #")
entire_string = "\n ".join(all_strings)
entire_string = "[{}".format(entire_string)
return entire_string
| {
"repo_name": "Digirolamo/FlowNet",
"path": "flownet/flownet.py",
"copies": "1",
"size": "13830",
"license": "mit",
"hash": -2619710601620929500,
"line_mean": 30.0138888889,
"line_max": 86,
"alpha_frac": 0.5377440347,
"autogenerated": false,
"ratio": 4.383518225039619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5421262259739619,
"avg_score": null,
"num_lines": null
} |
# A simple FTP client.
#
# The information to write this program was gathered from RFC 959,
# but this is not a complete implementation! Yet it shows how a simple
# FTP client can be built, and you are welcome to extend it to suit
# it to your needs...
#
# How it works (assuming you've read the RFC):
#
# User commands are passed uninterpreted to the server. However, the
# user never needs to send a PORT command. Rather, the client opens a
# port right away and sends the appropriate PORT command to the server.
# When a response code 150 is received, this port is used to receive
# the data (which is written to stdout in this version), and when the
# data is exhausted, a new port is opened and a corresponding PORT
# command sent. In order to avoid errors when reusing ports quickly
# (and because there is no s.getsockname() method in Python yet) we
# cycle through a number of ports in the 50000 range.
import sys, posix, string
from socket import *
BUFSIZE = 1024
# Default port numbers used by the FTP protocol.
#
FTP_PORT = 21
FTP_DATA_PORT = FTP_PORT - 1
# Change the data port to something not needing root permissions.
#
FTP_DATA_PORT = FTP_DATA_PORT + 50000
# Main program (called at the end of this file).
#
def main():
hostname = sys.argv[1]
control(hostname)
# Control process (user interface and user protocol interpreter).
#
def control(hostname):
#
# Create control connection
#
s = socket(AF_INET, SOCK_STREAM)
s.connect((hostname, FTP_PORT))
f = s.makefile('r') # Reading the replies is easier from a file...
#
# Control loop
#
r = None
while 1:
code = getreply(f)
if code in ('221', 'EOF'): break
if code == '150':
getdata(r)
code = getreply(f)
r = None
if not r:
r = newdataport(s, f)
cmd = getcommand()
if not cmd: break
s.send(cmd + '\r\n')
# Create a new data port and send a PORT command to the server for it.
# (Cycle through a number of ports to avoid problems with reusing
# a port within a short time.)
#
nextport = 0
#
def newdataport(s, f):
global nextport
port = nextport + FTP_DATA_PORT
nextport = (nextport+1) % 16
r = socket(AF_INET, SOCK_STREAM)
r.bind((gethostbyname(gethostname()), port))
r.listen(1)
sendportcmd(s, f, port)
return r
# Send an appropriate port command.
#
def sendportcmd(s, f, port):
hostname = gethostname()
hostaddr = gethostbyname(hostname)
hbytes = string.splitfields(hostaddr, '.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + string.joinfields(bytes, ',')
s.send(cmd + '\r\n')
code = getreply(f)
# Process an ftp reply and return the 3-digit reply code (as a string).
# The reply should be a line of text starting with a 3-digit number.
# If the 4th char is '-', it is a multi-line reply and is
# terminate by a line starting with the same 3-digit number.
# Any text while receiving the reply is echoed to the file.
#
def getreply(f):
line = f.readline()
if not line: return 'EOF'
print line,
code = line[:3]
if line[3:4] == '-':
while 1:
line = f.readline()
if not line: break # Really an error
print line,
if line[:3] == code and line[3:4] != '-': break
return code
# Get the data from the data connection.
#
def getdata(r):
print '(accepting data connection)'
conn, host = r.accept()
print '(data connection accepted)'
while 1:
data = conn.recv(BUFSIZE)
if not data: break
sys.stdout.write(data)
print '(end of data connection)'
# Get a command from the user.
#
def getcommand():
try:
while 1:
line = raw_input('ftp.py> ')
if line: return line
except EOFError:
return ''
# Call the main program.
#
main()
| {
"repo_name": "google/google-ctf",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Demo/sockets/ftp.py",
"copies": "6",
"size": "4074",
"license": "apache-2.0",
"hash": 2552150710960928300,
"line_mean": 25.904109589,
"line_max": 71,
"alpha_frac": 0.6158566519,
"autogenerated": false,
"ratio": 3.5611888111888113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7177045463088811,
"avg_score": null,
"num_lines": null
} |
"""A simple garbage collection heuritics."""
# stdlib
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
# third party
from typing_extensions import final
# syft relative
from ...common.uid import UID
from ...node.common.action.garbage_collect_batched_action import (
GarbageCollectBatchedAction,
)
from ..pointer import Pointer
from .gc_strategy import GCStrategy
if TYPE_CHECKING:
# syft relative
from ...node.common.client import Client
@final
class GCBatched(GCStrategy):
"""The GCBatched Strategy."""
__slots__ = [
"obj_ids",
"threshold",
"client",
]
client: Optional["Client"]
obj_ids: List[UID]
threshold: int
def __init__(self, threshold: int = 10) -> None:
"""Construct the GCBatched Strategy.
Args:
threshold (int): the threshold after which a message
would be sent to delete all the objects that were cached
Return:
None
"""
self.obj_ids = []
self.threshold = threshold
self.client = None
def reap(self, pointer: Pointer) -> None:
"""
Check if we passed the threshold of objects that we should cache.
If yes, then send a message to delete the objects.
If no, cache the object id to be deleted when there are more ids
collected.
Args:
pointer (Pointer): Pointer to the object that should get deleted
Return:
None
"""
self.obj_ids.append(pointer.id_at_location)
nr_objs_client = len(self.obj_ids)
if nr_objs_client >= self.threshold:
# Check the local threshold for the items that are kept for a client
msg = GarbageCollectBatchedAction(
ids_at_location=self.obj_ids, address=pointer.client.address
)
pointer.client.send_eventual_msg_without_reply(msg)
self.obj_ids = []
self.client = pointer.client
def __del__(self) -> None:
"""Send a GarbageCollectBatchedAction to the client such that all the
objects that are cached to be deleted would be deleted.
"""
if self.client is None:
return
msg = GarbageCollectBatchedAction(
ids_at_location=self.obj_ids, address=self.client.address
)
self.client.send_eventual_msg_without_reply(msg)
| {
"repo_name": "OpenMined/PySyft",
"path": "packages/syft/src/syft/core/pointer/garbage_collection/gc_batched.py",
"copies": "1",
"size": "2439",
"license": "apache-2.0",
"hash": 691976117034972800,
"line_mean": 26.1,
"line_max": 80,
"alpha_frac": 0.6166461665,
"autogenerated": false,
"ratio": 4.219723183391004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001371742112482853,
"num_lines": 90
} |
"""A simple, generic table parser for HTML.
Inputs:
- source: URL or (plain) filename
- addl_data: dict of additional data to add to each row
Outputs:
- List of dicts containing data from largest table on page
@author n.o.franklin@gmail.com
@date 2017-12-30
"""
from requests import get
from bs4 import BeautifulSoup
def generic_table_parser(source, addl_data):
if source[:4] == 'http':
response = get(source)
response.raise_for_status()
fh = response.text
else:
fh = open(source, 'r')
soup = BeautifulSoup(fh, 'lxml')
tabs = soup.find_all('table')
# Find largest table
tab_sizes = {}
for i, tab in enumerate(tabs):
rows = tab.find_all('tr')
row_num = 0
for row in rows:
row_num += 1
tab_sizes[row_num] = i
largest_index = tab_sizes[sorted(tab_sizes.keys(), reverse=True)[0]]
# Get table headings
headings = []
for header in tabs[largest_index].find_all('th'):
headings.append(header.text)
if not headings:
raise Exception("No table headings found.")
# Get table data
results = []
for row in tabs[largest_index].find_all('tr'):
cells = row.find_all('td')
if len(cells) == len(headings):
row_data = {headings[i]: cell.text for i, cell in enumerate(cells)}
row_data.update({'source': source})
row_data.update(addl_data)
results.append(row_data)
return results
| {
"repo_name": "UnitedThruAction/Data",
"path": "Tools/GenericTableParser.py",
"copies": "1",
"size": "1488",
"license": "apache-2.0",
"hash": 195267992053598270,
"line_mean": 26.0545454545,
"line_max": 79,
"alpha_frac": 0.6068548387,
"autogenerated": false,
"ratio": 3.576923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9681757713602874,
"avg_score": 0.00040404040404040404,
"num_lines": 55
} |
""" A simple genetic algorithm for parameter search """
from __future__ import print_function
from collections import OrderedDict
try:
from bayes_opt import BayesianOptimization
bayes_opt_present=True
except Exception:
BayesianOptimization = None
bayes_opt_present=False
from kernel_tuner.strategies import minimize
supported_methods = ["poi", "ei", "ucb"]
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
#Bayesian Optimization strategy seems to need some hyper parameter tuning to
#become better than random sampling for auto-tuning GPU kernels.
#alpha, normalize_y, and n_restarts_optimizer are options to
#https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html
#defaults used by Baysian Optimization are:
# alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces
# n_restarts_optimizer=5,
# normalize_y=True,
#several exploration friendly settings are: (default is acq="ucb", kappa=2.576)
# acq="poi", xi=1e-1
# acq="ei", xi=1e-1
# acq="ucb", kappa=10
if not bayes_opt_present:
raise ImportError("Error: optional dependency Bayesian Optimization not installed")
#defaults as used by Bayesian Optimization Python package
acq = tuning_options.strategy_options.get("method", "poi")
kappa = tuning_options.strategy_options.get("kappa", 2.576)
xi = tuning_options.strategy_options.get("xi", 0.0)
init_points = tuning_options.strategy_options.get("popsize", 5)
n_iter = tuning_options.strategy_options.get("maxiter", 25)
tuning_options["scaling"] = True
results = []
#function to pass to the optimizer
def func(**kwargs):
args = [kwargs[key] for key in tuning_options.tune_params.keys()]
return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)
bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)
pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))
verbose=0
if tuning_options.verbose:
verbose=2
optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)
optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)
if tuning_options.verbose:
print(optimizer.max)
return results, runner.dev.get_environment()
| {
"repo_name": "benvanwerkhoven/kernel_tuner",
"path": "kernel_tuner/strategies/bayes_opt.py",
"copies": "1",
"size": "3306",
"license": "apache-2.0",
"hash": -6082363215708889000,
"line_mean": 35.3296703297,
"line_max": 109,
"alpha_frac": 0.7126436782,
"autogenerated": false,
"ratio": 3.7355932203389832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9923280040943436,
"avg_score": 0.004991371519109359,
"num_lines": 91
} |
"""A simple GeoHash implementation."""
# Forward and reverse base 32 map
BASESEQUENCE = '0123456789bcdefghjkmnpqrstuvwxyz'
BASE32MAP = dict((k,count) for count,k in enumerate(BASESEQUENCE))
BASE32MAPR = dict((count,k) for count,k in enumerate(BASESEQUENCE))
def _bits_to_float(bits, lower=-90.0, middle=0.0, upper=90.0):
"""Convert GeoHash bits to a float."""
for i in bits:
if i:
lower = middle
else:
upper = middle
middle = (upper + lower) / 2
return middle
def _float_to_bits(value, lower=-90.0, middle=0.0, upper=90.0, length=15):
"""Convert a float to a list of GeoHash bits."""
ret = []
for i in range(length):
if value >= middle:
lower = middle
ret.append(1)
else:
upper = middle
ret.append(0)
middle = (upper + lower) / 2
return ret
def _geohash_to_bits(value):
"""Convert a GeoHash to a list of GeoHash bits."""
b = map(BASE32MAP.get, value)
ret = []
for i in b:
out = []
for z in range(5):
out.append(i & 0b1)
i = i >> 1
ret += out[::-1]
return ret
def _bits_to_geohash(value):
"""Convert a list of GeoHash bits to a GeoHash."""
ret = []
# Get 5 bits at a time
for i in (value[i:i+5] for i in xrange(0, len(value), 5)):
# Convert binary to integer
# Note: reverse here, the slice above doesn't work quite right in reverse.
total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])])
ret.append(BASE32MAPR[total])
# Join the string and return
return "".join(ret)
# Public
def decode(value):
"""Decode a geohash. Returns a (lon,lat) pair."""
assert value, "Invalid geohash: %s"%value
# Get the GeoHash bits
bits = _geohash_to_bits(value)
# Unzip the GeoHash bits.
lon = bits[0::2]
lat = bits[1::2]
# Convert to lat/lon
return (
_bits_to_float(lon, lower=-180.0, upper=180.0),
_bits_to_float(lat)
)
def encode(lonlat, length=12):
"""Encode a (lon,lat) pair to a GeoHash."""
assert len(lonlat) == 2, "Invalid lon/lat: %s"%lonlat
# Half the length for each component.
length /= 2
lon = _float_to_bits(lonlat[0], lower=-180.0, upper=180.0, length=length*5)
lat = _float_to_bits(lonlat[1], lower=-90.0, upper=90.0, length=length*5)
# Zip the GeoHash bits.
ret = []
for a,b in zip(lon,lat):
ret.append(a)
ret.append(b)
return _bits_to_geohash(ret)
def adjacent(geohash, direction):
"""Return the adjacent geohash for a given direction."""
# Based on an MIT licensed implementation by Chris Veness from:
# http://www.movable-type.co.uk/scripts/geohash.html
assert direction in 'nsew', "Invalid direction: %s"%direction
assert geohash, "Invalid geohash: %s"%geohash
neighbor = {
'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ],
's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ],
'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ],
'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ]
}
border = {
'n': [ 'prxz', 'bcfguvyz' ],
's': [ '028b', '0145hjnp' ],
'e': [ 'bcfguvyz', 'prxz' ],
'w': [ '0145hjnp', '028b' ]
}
last = geohash[-1]
parent = geohash[0:-1]
t = len(geohash) % 2
# Check for edge cases
if (last in border[direction][t]) and (parent):
parent = adjacent(parent, direction)
return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
def neighbors(geohash):
"""Return all neighboring geohashes."""
return {
'n': adjacent(geohash, 'n'),
'ne': adjacent(adjacent(geohash, 'n'), 'e'),
'e': adjacent(geohash, 'e'),
'se': adjacent(adjacent(geohash, 's'), 'e'),
's': adjacent(geohash, 's'),
'sw': adjacent(adjacent(geohash, 's'), 'w'),
'w': adjacent(geohash, 'w'),
'nw': adjacent(adjacent(geohash, 'n'), 'w'),
'c': geohash
}
def neighborsfit(centroid, points):
centroid = encode(centroid)
points = map(encode, points)
for i in range(1, len(centroid)):
g = centroid[0:i]
n = set(neighbors(g).values())
unbounded = [point for point in points if (point[0:i] not in n)]
if unbounded:
break
return g[0:-1]
| {
"repo_name": "transitland/mapzen-geohash",
"path": "mzgeohash/geohash.py",
"copies": "3",
"size": "4215",
"license": "mit",
"hash": 7292947793124478000,
"line_mean": 30.6917293233,
"line_max": 84,
"alpha_frac": 0.628232503,
"autogenerated": false,
"ratio": 2.8288590604026846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9789250018356566,
"avg_score": 0.0335683090092239,
"num_lines": 133
} |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import sys
def format_message(record):
try:
record_message = '%s' % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: 'F', # FATAL is alias of CRITICAL
logging.ERROR: 'E',
logging.WARN: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = '?'
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = '%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s' % (
level, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min,
date.tm_sec, date_usec,
record.process if record.process is not None else '?????',
record.filename,
record.lineno,
format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
logger = logging.getLogger()
def setLevel(newlevel):
logger.setLevel(newlevel)
logger.debug('Log level set to %s', newlevel)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
FATAL: 'FATAL'
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
""") % ''.join(_level_letters)
"""Regex you can use to parse glog line prefixes."""
def init(log_level = None, log_path = None):
if log_path is not None:
log_path = os.path.expanduser(log_path)
try:
handler = logging.FileHandler(log_path, delay=False)
except OSError:
handler = None
raise
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
if log_level is not None:
if log_level == 'debug':
log_level = DEBUG
elif log_level == 'info':
log_level = INFO
elif log_level == 'warning':
log_level = WARNING
elif log_level == 'warn':
log_level = WARN
elif log_level == 'error':
log_level = ERROR
elif log_level == 'exception':
log_level = EXCEPTION
elif log_level == 'fatal':
log_level = FATAL
else:
log_level = FATAL
setLevel(log_level)
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = logger.makeRecord('CRITICAL', 50, filename, line_num,
message, None, None)
handler.handle(log_record)
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
'Check failed here:', None, None)
handler.handle(log_record)
for line in stacktrace_lines:
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
line, None, None)
handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| {
"repo_name": "Chilledheart/vim-clangd",
"path": "python/clangd/glog.py",
"copies": "1",
"size": "6659",
"license": "bsd-3-clause",
"hash": 6286148477272031000,
"line_mean": 27.952173913,
"line_max": 90,
"alpha_frac": 0.584472143,
"autogenerated": false,
"ratio": 3.5724248927038627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46568970357038625,
"avg_score": null,
"num_lines": null
} |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import sys
import gflags
gflags.DEFINE_string('log_path',
os.path.expanduser('~/.config/windycode/logs/pywdc.log'), 'Log path')
import gflags as flags
FLAGS = flags.FLAGS
def format_message(record):
try:
record_message = '%s' % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: 'F', # FATAL is alias of CRITICAL
logging.ERROR: 'E',
logging.WARN: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = '?'
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = '%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s' % (
level, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min,
date.tm_sec, date_usec,
record.process if record.process is not None else '?????',
record.filename,
record.lineno,
format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
logger = logging.getLogger()
def setLevel(newlevel):
logger.setLevel(newlevel)
logger.debug('Log level set to %s', newlevel)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
FATAL: 'FATAL'
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
""") % ''.join(_level_letters)
"""Regex you can use to parse glog line prefixes."""
def init():
handler = logging.FileHandler(FLAGS.log_path)
setLevel(FLAGS.verbosity)
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
class CaptureWarningsFlag(flags.BooleanFlag):
def __init__(self):
flags.BooleanFlag.__init__(self, 'glog_capture_warnings', True,
"Redirect warnings to log.warn messages")
def Parse(self, arg):
flags.BooleanFlag.Parse(self, arg)
logging.captureWarnings(self.value)
flags.DEFINE_flag(CaptureWarningsFlag())
class VerbosityParser(flags.ArgumentParser):
"""Sneakily use gflags parsing to get a simple callback."""
def Parse(self, arg):
try:
intarg = int(arg)
# Look up the name for this level (DEBUG, INFO, etc) if it exists
level = logging._levelNames.get(intarg, intarg)
except ValueError:
level = arg
setLevel(level)
return level
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name='verbosity',
default=logging.INFO,
help='Logging verbosity')
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = logger.makeRecord('CRITICAL', 50, filename, line_num,
message, None, None)
handler.handle(log_record)
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
'Check failed here:', None, None)
handler.handle(log_record)
for line in stacktrace_lines:
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
line, None, None)
handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| {
"repo_name": "Chilledheart/windycode",
"path": "python/glog.py",
"copies": "1",
"size": "7070",
"license": "bsd-3-clause",
"hash": 7727933797591473000,
"line_mean": 27.28,
"line_max": 90,
"alpha_frac": 0.6024045262,
"autogenerated": false,
"ratio": 3.590655154900965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46930596811009645,
"avg_score": null,
"num_lines": null
} |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import gflags as flags
FLAGS = flags.FLAGS
def format_message(record):
try:
record_message = '%s' % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: 'F', # FATAL is alias of CRITICAL
logging.ERROR: 'E',
logging.WARN: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = '?'
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = '%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s' % (
level, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min,
date.tm_sec, date_usec,
record.process if record.process is not None else '?????',
record.filename,
record.lineno,
format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
logger = logging.getLogger()
handler = logging.StreamHandler()
def setLevel(newlevel):
logger.setLevel(newlevel)
logger.debug('Log level set to %s', newlevel)
def init():
setLevel(FLAGS.verbosity)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
FATAL: 'FATAL'
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
""") % ''.join(_level_letters)
"""Regex you can use to parse glog line prefixes."""
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
class CaptureWarningsFlag(flags.BooleanFlag):
def __init__(self):
flags.BooleanFlag.__init__(self, 'glog_capture_warnings', True,
"Redirect warnings to log.warn messages")
def Parse(self, arg):
flags.BooleanFlag.Parse(self, arg)
logging.captureWarnings(self.value)
flags.DEFINE_flag(CaptureWarningsFlag())
class VerbosityParser(flags.ArgumentParser):
"""Sneakily use gflags parsing to get a simple callback."""
def Parse(self, arg):
try:
intarg = int(arg)
# Look up the name for this level (DEBUG, INFO, etc) if it exists
try:
level = logging._levelNames.get(intarg, intarg)
except AttributeError: # This was renamed somewhere b/w 2.7 and 3.4
level = logging._levelToName.get(intarg, intarg)
except ValueError:
level = arg
setLevel(level)
return level
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name='verbosity',
default=logging.INFO,
help='Logging verbosity')
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = logger.makeRecord('CRITICAL', 50, filename, line_num,
message, None, None)
handler.handle(log_record)
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
'Check failed here:', None, None)
handler.handle(log_record)
for line in stacktrace_lines:
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
line, None, None)
handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| {
"repo_name": "benley/python-glog",
"path": "glog.py",
"copies": "1",
"size": "7080",
"license": "bsd-2-clause",
"hash": 2898157753807107600,
"line_mean": 27.6639676113,
"line_max": 90,
"alpha_frac": 0.5997175141,
"autogenerated": false,
"ratio": 3.6177823198773633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9713695152978225,
"avg_score": 0.0007609361998277937,
"num_lines": 247
} |
"""A simple grid plane module.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
from traitsui.api import View, Group, Item
# Local imports
from mayavi.core.module import Module
from mayavi.components import grid_plane
from mayavi.components.actor import Actor
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `GridPlane` class.
######################################################################
class GridPlane(Module):
# The version of this class. Used for persistence.
__version__ = 0
grid_plane = Instance(grid_plane.GridPlane, allow_none=False,
record=True)
actor = Instance(Actor, allow_non=False, record=True)
input_info = PipelineInfo(datasets=['image_data',
'structured_grid',
'rectilinear_grid'],
attribute_types=['any'],
attributes=['any'])
view = View(Group(Item(name='grid_plane', style='custom'),
Item(name='actor', style='custom'),
show_labels=False))
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create the components
self.grid_plane = grid_plane.GridPlane()
self.actor = Actor()
# Setup the actor suitably for this module.
prop = self.actor.property
prop.set(backface_culling=0, frontface_culling=0,
representation='w')
self.actor.mapper.scalar_visibility = 0
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
# Data is available, so set the input for the grid plane.
self.grid_plane.inputs = [mm.source]
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the component should do the rest.
self.data_changed = True
######################################################################
# Non-public methods.
######################################################################
def _grid_plane_changed(self, old, new):
actor = self.actor
if actor is not None:
actor.inputs = [new]
self._change_components(old, new)
def _actor_changed(self, old, new):
new.scene = self.scene
gp = self.grid_plane
if gp is not None:
new.inputs = [gp]
self._change_components(old, new)
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/modules/grid_plane.py",
"copies": "3",
"size": "3923",
"license": "bsd-3-clause",
"hash": -8398159045276610000,
"line_mean": 34.3423423423,
"line_max": 74,
"alpha_frac": 0.5388733112,
"autogenerated": false,
"ratio": 4.648104265402844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6686977576602844,
"avg_score": null,
"num_lines": null
} |
"""A simple guestbook app to test parts of NDB end-to-end."""
import cgi
import logging
import os
import re
import sys
import time
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.datastore import entity_pb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
import ndb
HOME_PAGE = """
<script>
function focus() {
textarea = document.getElementById('body');
textarea.focus();
}
</script>
<body onload=focus()>
Nickname: <a href="/account">%(nickname)s</a> |
<a href="%(login)s">login</a> |
<a href="%(logout)s">logout</a>
<form method=POST action=/home>
<!-- TODO: XSRF protection -->
<input type=text id=body name=body size=60>
<input type=submit>
</form>
</body>
"""
ACCOUNT_PAGE = """
<body>
Nickname: <a href="/account">%(nickname)s</a> |
<a href="%(logout)s">logout</a>
<form method=POST action=/account>
<!-- TODO: XSRF protection -->
Email: %(email)s<br>
New nickname:
<input type=text name=nickname size=20 value=%(proposed_nickname)s><br>
<input type=submit name=%(action)s value="%(action)s Account">
<input type=submit name=delete value="Delete Account">
<a href=/home>back to home page</a>
</form>
</body>
"""
class Account(ndb.Model):
"""User account."""
email = ndb.StringProperty()
userid = ndb.StringProperty()
nickname = ndb.StringProperty()
class Message(ndb.Model):
"""Guestbook message."""
body = ndb.StringProperty()
when = ndb.FloatProperty()
userid = ndb.StringProperty()
class UrlSummary(ndb.Model):
"""Metadata about a URL."""
MAX_AGE = 60
url = ndb.StringProperty()
title = ndb.StringProperty()
when = ndb.FloatProperty()
def account_key(userid):
return ndb.Key(Account, userid)
def get_account(userid):
"""Return a Future for an Account."""
return account_key(userid).get_async()
@ndb.tasklet
def get_nickname(userid):
"""Return a Future for a nickname from an account."""
account = yield get_account(userid)
if not account:
nickname = 'Unregistered'
else:
nickname = account.nickname or account.email
raise ndb.Return(nickname)
class HomePage(webapp.RequestHandler):
@ndb.toplevel
def get(self):
nickname = 'Anonymous'
user = users.get_current_user()
if user is not None:
nickname = yield get_nickname(user.user_id())
values = {'nickname': nickname,
'login': users.create_login_url('/home'),
'logout': users.create_logout_url('/home'),
}
self.response.out.write(HOME_PAGE % values)
qry, options = self._make_query()
pairs = yield qry.map_async(self._hp_callback, options=options)
for key, text in pairs:
self.response.out.write(text)
def _make_query(self):
qry = Message.query().order(-Message.when)
options = datastore_query.QueryOptions(batch_size=13, limit=43)
return qry, options
@ndb.tasklet
def _hp_callback(self, message):
nickname = 'Anonymous'
if message.userid:
nickname = yield get_nickname(message.userid)
# Check if there's an URL.
body = message.body
m = re.search(r'(?i)\bhttps?://\S+[^\s.,;\]\}\)]', body)
if not m:
escbody = cgi.escape(body)
else:
url = m.group()
pre = body[:m.start()]
post = body[m.end():]
title = ''
key = ndb.Key(flat=[UrlSummary.GetKind(), url])
summary = yield key.get_async()
if not summary or summary.when < time.time() - UrlSummary.MAX_AGE:
rpc = urlfetch.create_rpc(deadline=0.5)
urlfetch.make_fetch_call(rpc, url,allow_truncated=True)
t0 = time.time()
result = yield rpc
t1 = time.time()
logging.warning('url=%r, status=%r, dt=%.3f',
url, result.status_code, t1-t0)
if result.status_code == 200:
bodytext = result.content
m = re.search(r'(?i)<title>([^<]+)</title>', bodytext)
if m:
title = m.group(1).strip()
summary = UrlSummary(key=key, url=url, title=title,
when=time.time())
yield summary.put_async()
hover = ''
if summary.title:
hover = ' title="%s"' % summary.title
escbody = (cgi.escape(pre) +
'<a%s href="%s">' % (hover, cgi.escape(url)) +
cgi.escape(url) + '</a>' + cgi.escape(post))
text = '%s - %s - %s<br>' % (cgi.escape(nickname),
time.ctime(message.when or 0),
escbody)
if message.when is None:
message.when = 0
raise ndb.Return((-message.when, text))
@ndb.toplevel
def post(self):
# TODO: XSRF protection.
body = self.request.get('body', '').strip()
if body:
userid = None
user = users.get_current_user()
if user:
userid = user.user_id()
message = Message(body=body, when=time.time(), userid=userid)
yield message.put_async()
self.redirect('/home')
class AccountPage(webapp.RequestHandler):
@ndb.toplevel
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url('/account'))
return
email = user.email()
action = 'Create'
account, nickname = yield (get_account(user.user_id()),
get_nickname(user.user_id()))
if account is not None:
action = 'Update'
if account:
proposed_nickname = account.nickname or account.email
else:
proposed_nickname = email
values = {'email': email,
'nickname': nickname,
'proposed_nickname': proposed_nickname,
'login': users.create_login_url('/home'),
'logout': users.create_logout_url('/home'),
'action': action,
}
self.response.out.write(ACCOUNT_PAGE % values)
@ndb.toplevel
def post(self):
# TODO: XSRF protection.
@ndb.tasklet
def helper():
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url('/account'))
return
account = yield get_account(user.user_id())
if self.request.get('delete'):
if account:
yield account.key.delete_async()
self.redirect('/account')
return
if not account:
account = Account(key=account_key(user.user_id()),
email=user.email(), userid=user.user_id())
nickname = self.request.get('nickname')
if nickname:
account.nickname = nickname
yield account.put_async()
self.redirect('/account')
yield ndb.transaction_async(helper)
urls = [
('/home', HomePage),
('/account', AccountPage),
]
app = webapp.WSGIApplication(urls)
def main():
util.run_wsgi_app(app)
if __name__ == '__main__':
main()
| {
"repo_name": "bslatkin/8-bits",
"path": "appengine-ndb/demo/main.py",
"copies": "1",
"size": "6974",
"license": "apache-2.0",
"hash": -7432421329148439000,
"line_mean": 26.4566929134,
"line_max": 75,
"alpha_frac": 0.6036707772,
"autogenerated": false,
"ratio": 3.6153447382063244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4719015515406324,
"avg_score": null,
"num_lines": null
} |
"""A simple gui chat client. It's built off the simpleclient.py in the
Twisted examples, and uses TwistedRenderer to tie the GUI and network
elements together.
The GUI has two screens: LoginScreen and ChatScreen. When the Protocol
detects a server connection, it emits an event to switch to the
ChatScreen. When the Protocol detects it has lost a connection, it emits
an event to switch to the LoginScreen.
The client and server doesn't really keep track of which users are
connected. This could be done with some more complex data being sent
across the wire (maybe even just a dict), but for this example, only
chat text is sent. Also, I didn't bother making the chat window scroll
down automatically when new items are added. This may be something that
can be done with ScrolledList, but I haven't looked at it closely
enough.
written by Ben Olsen
"""
import pygame
from pygame.locals import *
from ocempgui.widgets import *
from ocempgui.widgets.Constants import *
from ocempgui.widgets.components import TextListItem
from ocempgui.object import BaseObject
from ocempgui.events import INotifyable
from twisted.internet._threadedselect import install
install()
from twisted.internet import reactor, protocol
# local signals
SIG_REC = 'rec'
SIG_SEND = 'send'
COMMAND = 'command'
class LoginScreen (BaseObject):
"""The Login Screen contains Entries for server, port, and user
name. The button emits a signal containing this info, which is
picked up by the factory and used in reactor.connectTCP. It inherits
from BaseObject to make signal processing easier.
"""
def __init__ (self, re):
BaseObject.__init__ (self)
# Remember to set _signals before setting the manager!
self._signals[COMMAND] = []
self.connect_signal (COMMAND, self.processCommand)
self.manager = re.active_layer[2]
self.renderer = re
# self.frame is the main widget for this class.
self.frame = VFrame (Label ("Enter login information"))
# labels on the left, entry fields on the right.
self.table = Table (3, 3)
self.table.set_column_align (0, ALIGN_RIGHT)
self.table.set_column_align (1, ALIGN_LEFT)
self.table.add_child (0, 0, Label ("Server:"))
self.table.add_child (1, 0, Label ("Port:"))
self.table.add_child (2, 0, Label ("Username:"))
self.serverEntry = Entry ('localhost')
self.portEntry = Entry ('8008')
self.nameEntry = Entry ()
self.table.add_child (0, 1, self.serverEntry)
self.table.add_child (1, 1, self.portEntry)
self.table.add_child (2, 1, self.nameEntry)
self.frame.add_child (self.table)
self.button = Button ("Connect")
self.button.connect_signal (SIG_MOUSEDOWN, self.sendConnect)
self.frame.add_child (self.button)
# Set the frame roughly in the middle.
self.frame.topleft = (190, 70)
def sendConnect (self, data):
# Called when the button gets SIG_MOUSEDOWN.
self.emit (COMMAND, (self.serverEntry.text,
int (self.portEntry.text), self.nameEntry.text))
def processCommand (self, data):
# This function adds or removes self.frame from the renderer
# based on command signals sent from the Protocol.
if data == 'switch_to_chat':
self.renderer.remove_widget (self.frame)
if data == 'switch_to_login':
self.renderer.add_widget (self.frame)
def notify (self, event):
self.run_signal_handlers (event.signal, event.data)
class ChatScreen (BaseObject):
"""The ChatScreen only has two widgets: the chat window, which is a
ScrolledList, and the Entry field. The Entry is tied to the ENTER
key and emits a signal telling the Protocol to send the attached
data. Listens for SIG_REC events which are sent by the Protocol and
adds the data to the ScrolledList.
"""
def __init__ (self, re):
BaseObject.__init__ (self)
# Remember to set _signals before setting the manager!
self._signals[SIG_REC] = []
self.connect_signal (SIG_REC, self.addText)
self._signals[COMMAND] = []
self.connect_signal (COMMAND, self.processCommand)
self.manager = re.active_layer[2]
self.renderer = re
# self.table is the main widget for this class
self.table = Table(2,2)
self.chatframe = VFrame (Label ("Chat Window"))
self.chatwindow = ScrolledList (520, 240)
self.chatwindow.selectionmode = SELECTION_NONE
self.chatframe.add_child (self.chatwindow)
self.entryFrame = VFrame ()
self.entry = Entry ()
self.entry.minsize = (520, 24)
self.entry.connect_signal ("input", self.sendText)
self.entryFrame.add_child (self.entry)
self.table.add_child (0, 0, self.chatframe)
self.table.add_child (1, 0, self.entryFrame)
def addText (self, text):
# Called when the class gets a SIG_REC from the Protocol.
self.chatwindow.items.append (TextListItem (text))
# Enforce update before proceeding, so scrolling will be
# correct and is not influenced by the sprite update cycles.
self.chatwindow.update ()
self.chatwindow.vscrollbar.value = self.chatwindow.vscrollbar.maximum
def sendText (self):
# Called when the Entry gets ENTER.
self.emit (SIG_SEND, self.entry.text)
self.entry.text = ''
def processCommand (self, data):
# This function adds or removes self.table from the renderer
# based on command signals sent from the Protocol.
if data == 'switch_to_chat':
self.renderer.add_widget (self.table)
if data == 'switch_to_login':
self.renderer.remove_widget (self.table)
def notify (self, event):
self.run_signal_handlers (event.signal, event.data)
class ChatClient (protocol.Protocol, INotifyable):
"""The Protocol does most of the work. When it detects a connection,
it emits a signal to switch from LoginScreen to ChatScreen, and vice
versa for losing a connetion. It watches for SIG_SEND events and
sends the data across the connection.
Note that because of the way the Factory instantiates the Protocol,
it doesn't make sense to have this class inherit from
BaseObject. Instead, once it knows it has a connection, it adds
itself to the renderer's EventManager.
"""
def __init__ (self):
self.user = None
def connectionMade (self):
# The connection was successful, and we now have communication
# with a server.
self.manager.add_object (self, SIG_SEND)
self.manager.emit (COMMAND, 'switch_to_chat')
# The factory gets the login info, including user name, so set
# it here to match the factory one.
self.user = self.factory.user
def connectionLost (self, reason):
self.manager.emit (COMMAND, 'switch_to_login')
def dataReceived(self, data):
# When we get data from the network, emit an event to have it
# displayed by the ChatScreen.
self.manager.emit (SIG_REC, data)
def notify (self, event):
# Since the server doesn't keep track of user names, prepend
# self.user to the beginning of each message
if event.signal == SIG_SEND:
self.transport.write (str (self.user) + '>> ' + str (event.data))
class ChatFactory (protocol.ClientFactory, INotifyable):
"""The Factory is the object used by the reactor. It keeps track of
the connection with self.connected, so that hitting the LoginScreen
connect button doesn't try to connect multiple times.
"""
protocol = ChatClient
def __init__ (self, re):
# Set the Protocol's event manager.
self.protocol.manager = re.active_layer[2]
self.re = re
self.connected = 0
self.user = None
def clientConnectionFailed (self, connector, reason):
self.connected = 0
def clientConnectionLost (self, connector, reason):
self.connected = 0
def startedConnecting (self, connector):
self.connected = 1
def notify (self, event):
if (event.signal == 'command') and \
(type(event.data) == tuple) and not self.connected:
server = event.data[0]
port = event.data[1]
self.user = event.data[2]
# Attempt the connection.
reactor.connectTCP (server, port, self)
def main ():
re = TwistedRenderer ()
re.create_screen (550, 320)
re.title = "Chat client"
re.color = (234, 228, 223)
re.reactor = reactor
# Use something more legible than the default
base.GlobalStyle.styles["default"]["font"]["name"] = "Arial"
base.GlobalStyle.styles["default"]["font"]["size"] = 12
# Create the two screen objects.
chatter = ChatScreen (re)
login = LoginScreen (re)
# We always start at the login screen.
re.add_widget (login.frame)
# Create the factory.
factory = ChatFactory (re)
re.active_layer[2].add_object (factory, COMMAND)
re.start()
if __name__ == '__main__':
main ()
| {
"repo_name": "prim/ocempgui",
"path": "doc/examples/chat/guiclient.py",
"copies": "1",
"size": "9336",
"license": "bsd-2-clause",
"hash": -5928444186705799000,
"line_mean": 37.106122449,
"line_max": 77,
"alpha_frac": 0.6485646958,
"autogenerated": false,
"ratio": 3.947568710359408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9976636921566597,
"avg_score": 0.02389929691856221,
"num_lines": 245
} |
"""A simple GUI framework for Pygame.
This framework is not meant as a competitor to PyQt or other, perhaps more
formal, GUI frameworks. Instead, pygameui is but a simple framework for game
prototypes.
The app is comprised of a stack of scenes; the top-most or current scene is
what is displayed in the window. Scenes are comprised of Views which are
comprised of other Views. pygameui contains view classes for things like
labels, buttons, and scrollbars.
pygameui is a framework, not a library. While you write view controllers in the
form of scenes, pygameui will run the overall application by running a loop
that receives device events (mouse button clicks, keyboard presses, etc.) and
dispatches the events to the relevant view(s) in your scene(s).
Each view in pygameui is rectangular in shape and whose dimensions are
determined by the view's "frame". A view is backed by a Pygame surface.
Altering a view's frame requires that you call 'relayout' which will resize the
view's backing surface and give each child view a chance to reposition and/or
resize itself in response.
Events on views can trigger response code that you control. For instance, when
a button is clicked, your code can be called back. The click is a "signal" and
your code is a "slot". The view classes define various signals to which you
connect zero or more slots.
a_button.on_clicked.connect(click_callback)
"""
AUTHOR = 'Brian Hammond <brian@fictorial.com>'
COPYRIGHT = 'Copyright (C) 2012 Fictorial LLC.'
LICENSE = 'MIT'
__version__ = '0.2.0'
import pygame
from alert import *
from button import *
from callback import *
from checkbox import *
from dialog import *
from flipbook import *
from grid import *
from imagebutton import *
from imageview import *
from label import *
from listview import *
from notification import *
from progress import *
from render import *
from resource import *
from scroll import *
from select import *
from slider import *
from spinner import *
from textfield import *
from view import *
import focus
import window
import scene
import theme
from scene import Scene
import logging
logger = logging.getLogger(__name__)
Rect = pygame.Rect
window_surface = None
def init(name='', window_size=(640, 480)):
logger.debug('init %s %s' % (__name__, __version__))
pygame.init()
logger.debug('pygame %s' % pygame.__version__)
pygame.key.set_repeat(200, 50)
global window_surface
window_surface = pygame.display.set_mode(window_size)
pygame.display.set_caption(name)
window.rect = pygame.Rect((0, 0), window_size)
theme.init()
def run():
assert len(scene.stack) > 0
clock = pygame.time.Clock()
down_in_view = None
elapsed = 0
while True:
dt = clock.tick(60)
elapsed += dt
if elapsed > 5000:
elapsed = 0
logger.debug('%d FPS', clock.get_fps())
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
import sys
sys.exit()
mousepoint = pygame.mouse.get_pos()
if e.type == pygame.MOUSEBUTTONDOWN:
hit_view = scene.current.hit(mousepoint)
logger.debug('hit %s' % hit_view)
if (hit_view is not None and
not isinstance(hit_view, scene.Scene)):
focus.set(hit_view)
down_in_view = hit_view
pt = hit_view.from_window(mousepoint)
hit_view.mouse_down(e.button, pt)
else:
focus.set(None)
elif e.type == pygame.MOUSEBUTTONUP:
hit_view = scene.current.hit(mousepoint)
if hit_view is not None:
if down_in_view and hit_view != down_in_view:
down_in_view.blurred()
focus.set(None)
pt = hit_view.from_window(mousepoint)
hit_view.mouse_up(e.button, pt)
down_in_view = None
elif e.type == pygame.MOUSEMOTION:
if down_in_view and down_in_view.draggable:
pt = down_in_view.from_window(mousepoint)
down_in_view.mouse_drag(pt, e.rel)
else:
scene.current.mouse_motion(mousepoint)
elif e.type == pygame.KEYDOWN:
if focus.view:
focus.view.key_down(e.key, e.unicode)
else:
scene.current.key_down(e.key, e.unicode)
elif e.type == pygame.KEYUP:
if focus.view:
focus.view.key_up(e.key)
else:
scene.current.key_up(e.key)
scene.current.update(dt / 1000.0)
scene.current.draw()
window_surface.blit(scene.current.surface, (0, 0))
pygame.display.flip()
| {
"repo_name": "fictorial/pygameui",
"path": "pygameui/__init__.py",
"copies": "2",
"size": "4921",
"license": "mit",
"hash": 7732615115943387000,
"line_mean": 30.7483870968,
"line_max": 79,
"alpha_frac": 0.6189798821,
"autogenerated": false,
"ratio": 3.8962787015043547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009385547333801559,
"num_lines": 155
} |
# A simple hash table implementation for learning purposes. You shouldn't actually use this in real code! Python provides a much better implementation with the default dictionary datatype.
class ExampleHashTable(object):
def __init__(self, buckets, hash_function=None):
self.buckets = []
for x in range(buckets):
self.buckets.append(list()) # If we did this with self.buckets = [[]] * buckets, we'd end up with a buckets list containing many references to the same list, so we'd really only have one bucket!
if hash_function is None: # If they didn't pass their own hash function, use Python's default hash()
self.hash_function = hash
else:
self.hash_function = hash_function # Otherwise, use the specified custom one
self.num_entries = 0
def insert(self, key, value):
'''Insert a key/value pair into the hash table.'''
key_idx = self.get_bucket_index(key)
# If that value is already associated with that key, we don't need to do anything
if value in self.buckets[key_idx]:
return
# Otherwise, place the value in the bucket that key maps to
self.buckets[key_idx].append(value)
self.num_entries += 1
def remove(self, key, value):
'''Removes the mapping between a key and a value in the hash table.'''
index = self.get_bucket_index(key)
if value in self.buckets[index]:
self.buckets[index].remove(value)
self.num_entries -= 1
def get(self, key):
'''Retrieve the values in the bucket specified key maps to.'''
return self.buckets[self.get_bucket_index(key)]
def get_bucket_index(self, key):
'''Determine the index the key maps to by hashing it and using the modulus operator to map it onto a bucket.'''
key_hash = self.hash_function(key)
return key_hash % len(self.buckets)
def get_load_factor(self):
return float(self.num_entries)/len(self.buckets) | {
"repo_name": "ross-t/python-ds",
"path": "Tables/hashtable.py",
"copies": "1",
"size": "1818",
"license": "mit",
"hash": 8384190014543952000,
"line_mean": 43.3658536585,
"line_max": 197,
"alpha_frac": 0.7227722772,
"autogenerated": false,
"ratio": 3.543859649122807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4766631926322807,
"avg_score": null,
"num_lines": null
} |
""" A simple helloworld example
Different workflows are shown here.
"""
from tensorflow import keras
from tensorflow.keras import layers
from keras_tuner.engine.hypermodel import HyperModel
from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch
(x, y), (val_x, val_y) = keras.datasets.mnist.load_data()
x = x.astype("float32") / 255.0
val_x = val_x.astype("float32") / 255.0
x = x[:10000]
y = y[:10000]
"""Basic case:
- We define a `build_model` function
- It returns a compiled model
- It uses hyperparameters defined on the fly
"""
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten(input_shape=(28, 28)))
for i in range(hp.Int("num_layers", 2, 20)):
model.add(
layers.Dense(
units=hp.Int("units_" + str(i), 32, 512, 32), activation="relu"
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice("learning_rate", [1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
build_model,
objective="val_accuracy",
max_trials=5,
executions_per_trial=3,
directory="test_dir",
project_name="case1",
)
tuner.search_space_summary()
tuner.search(x=x, y=y, epochs=3, validation_data=(val_x, val_y))
tuner.results_summary()
# """Case #2:
# - We override the loss and metrics
# """
tuner = RandomSearch(
build_model,
objective="val_accuracy",
loss=keras.losses.SparseCategoricalCrossentropy(name="my_loss"),
metrics=["accuracy", "mse"],
max_trials=5,
directory="test_dir",
project_name="case2",
)
tuner.search(x, y, epochs=5, validation_data=(val_x, val_y))
# """Case #3:
# - We define a HyperModel subclass
# """
class MyHyperModel(HyperModel):
def __init__(self, img_size, classes):
self.img_size = img_size
self.classes = classes
def build(self, hp):
model = keras.Sequential()
model.add(layers.Flatten(input_shape=self.img_size))
for i in range(hp.Int("num_layers", 2, 20)):
model.add(
layers.Dense(
units=hp.Int("units_" + str(i), 32, 512, 32), activation="relu"
)
)
model.add(layers.Dense(self.classes, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice("learning_rate", [1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
MyHyperModel(img_size=(28, 28), classes=10),
objective="val_accuracy",
max_trials=5,
directory="test_dir",
project_name="case3",
)
tuner.search(x, y=y, epochs=5, validation_data=(val_x, val_y))
# """Case #4:
# - We restrict the search space
# - This means that default values are being used for params that are left out
# """
hp = HyperParameters()
hp.Choice("learning_rate", [1e-1, 1e-3])
tuner = RandomSearch(
build_model,
max_trials=5,
hyperparameters=hp,
tune_new_entries=False,
objective="val_accuracy",
directory="test_dir",
project_name="case4",
)
tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))
# """Case #5:
# - We override specific parameters with fixed values that aren't the default
# """
hp = HyperParameters()
hp.Fixed("learning_rate", 0.1)
tuner = RandomSearch(
build_model,
max_trials=5,
hyperparameters=hp,
tune_new_entries=True,
objective="val_accuracy",
directory="test_dir",
project_name="case5",
)
tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))
# """Case #6:
# - We reparameterize the search space
# - This means that we override the distribution of specific hyperparameters
# """
hp = HyperParameters()
hp.Choice("learning_rate", [1e-1, 1e-3])
tuner = RandomSearch(
build_model,
max_trials=5,
hyperparameters=hp,
tune_new_entries=True,
objective="val_accuracy",
directory="test_dir",
project_name="case6",
)
tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))
# """Case #7:
# - We predefine the search space
# - No unregistered parameters are allowed in `build`
# """
hp = HyperParameters()
hp.Choice("learning_rate", [1e-1, 1e-3])
hp.Int("num_layers", 2, 20)
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten(input_shape=(28, 28)))
for i in range(hp.get("num_layers")):
model.add(layers.Dense(32, activation="relu"))
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(hp.get("learning_rate")),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
build_model,
max_trials=5,
hyperparameters=hp,
allow_new_entries=False,
objective="val_accuracy",
directory="test_dir",
project_name="case7",
)
tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))
# """Case #8:
# - Similar to Base Case.
# - However, specify conditions on units so that the summary show
# - only relevant hyperparameters.
# """
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten(input_shape=(28, 28)))
min_layers = 2
max_layers = 5
for i in range(hp.Int("num_layers", min_layers, max_layers)):
with hp.conditional_scope("num_layers", list(range(i + 1, max_layers + 1))):
model.add(
layers.Dense(
units=hp.Int("units_" + str(i), 32, 256, 32), activation="relu"
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(1e-4),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
build_model,
objective="val_accuracy",
max_trials=10,
executions_per_trial=3,
directory="test_dir",
)
tuner.search_space_summary()
tuner.search(x=x, y=y, epochs=3, validation_data=(val_x, val_y))
tuner.results_summary()
# """Case #9:
# - Similar to Case #8, but use parent_name, parent_value keywords pair for
# - conditional scope Using keywords for conditional scope does not
# - support nested conditions.
# """
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten(input_shape=(28, 28)))
min_layers = 2
max_layers = 5
for i in range(hp.Int("num_layers", min_layers, max_layers)):
model.add(
layers.Dense(
units=hp.Int(
"units_" + str(i),
32,
256,
32,
parent_name="num_layers",
parent_values=list(range(i + 1, max_layers + 1)),
),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(1e-4),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
tuner = RandomSearch(
build_model,
objective="val_accuracy",
max_trials=10,
executions_per_trial=3,
directory="test_dir",
)
tuner.search_space_summary()
tuner.search(x=x, y=y, epochs=3, validation_data=(val_x, val_y))
tuner.results_summary()
| {
"repo_name": "keras-team/keras-tuner",
"path": "examples/helloworld.py",
"copies": "1",
"size": "7597",
"license": "apache-2.0",
"hash": -2690607335480103400,
"line_mean": 23.6655844156,
"line_max": 84,
"alpha_frac": 0.6103725155,
"autogenerated": false,
"ratio": 3.32472647702407,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.443509899252407,
"avg_score": null,
"num_lines": null
} |
# A simple hello-world example to create a bar graph
import svgdatashapes as s
def example0():
mydata = [ { 'name':'Group A', 'value':38.4 },
{ 'name':'Group B', 'value':67.4 },
{ 'name':'Group C', 'value':49.2 } ]
# get a unique list of the categories for the X axis
cats = s.uniqcats( datarows=mydata, column="name" )
# set our text and line properties....
textstyle = 'font-family: sans-serif; font-weight: bold;'
s.settext( ptsize=12, color='#444', style=textstyle )
s.setline( color='#ccc' )
# begin building our svg
s.svgbegin( width=550, height=350 )
# set up our X axis space (categorical) located from x=100 to x=400 in the svg
s.xspace( svgrange=(100,400), catlist=cats )
# set up our Y axis space (numeric) located from y=100 to y=300 in the svg
s.yspace( svgrange=(100,300), datarange=(0,100) )
# render the X and Y axis...
s.xaxis( tics=8 )
s.yaxis( axisline=False, grid=True )
s.plotdeco( ylabel='Score' )
# render the column bars and error bars....
for row in mydata:
s.bar( x=row['name'], y=row['value'], color='#8a8', width=50, opacity=0.8 )
# add a "Goal" line
s.setline( color='#33a', dash='5,2' )
s.line( x1='min', y1=60.0, x2='max', y2=60.0 )
s.label( x='max', y=60.0, text="Goal", xadjust=10 )
# return the SVG. The caller could then add it in to the rendered HTML.
return s.svgresult()
| {
"repo_name": "grubbcode/minplot",
"path": "examples/e0.py",
"copies": "2",
"size": "1465",
"license": "mit",
"hash": 5497286994679236000,
"line_mean": 33.0697674419,
"line_max": 83,
"alpha_frac": 0.6027303754,
"autogenerated": false,
"ratio": 3.0520833333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4654813708733334,
"avg_score": null,
"num_lines": null
} |
"""A simple 'hello world' sample, which accesses the 'users' service,
and shows how to get information about the current instance.
"""
import logging
import os
import jinja2
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
def get_url_for_instance(instance_id):
"""Return a full url of the current instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'http://{}.{}.{}'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for logging in/out.
Args:
An original URL.
Returns:
Two-value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
class Hello(webapp2.RequestHandler):
"""Display a greeting, using user info if logged in, and display information
about the instance.
"""
def get(self):
"""Display a 'Hello' message"""
instance_id = modules.get_current_instance_id()
message = 'Hello'
if users.get_current_user():
nick = users.get_current_user().nickname()
message += ', %s' % nick
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(template.render(instance_url=get_url_for_instance(instance_id),
url=url,
url_linktext=url_linktext,
message=message))
APPLICATION = webapp2.WSGIApplication([
('/', Hello)
], debug=True)
| {
"repo_name": "bshaffer/appengine-python-vm-hello",
"path": "main.py",
"copies": "4",
"size": "2207",
"license": "apache-2.0",
"hash": -3304512769843511000,
"line_mean": 30.5285714286,
"line_max": 95,
"alpha_frac": 0.6221114635,
"autogenerated": false,
"ratio": 3.9837545126353793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.660586597613538,
"avg_score": null,
"num_lines": null
} |
"""A simple 'hello world' sample, which includes examples of start/stop
handlers, accesses the 'users' service, and shows how to get information about
the current instance.
"""
import logging
import os
import time
import jinja2
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import users
from google.appengine.ext import deferred
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
def get_url_for_instance(instance_id):
"""Return a full url of the current instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'http://{}.{}.{}'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for logging in/out.
Args:
An original URL.
Returns:
Two-value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
def do_heavy_task():
time.sleep(30)
return
class Hello(webapp2.RequestHandler):
"""Display a greeting, using user info if logged in, and display information
about the instance.
"""
def get(self):
"""Display a 'Hello' message"""
instance_id = modules.get_current_instance_id()
message = 'Hello'
if users.get_current_user():
nick = users.get_current_user().nickname()
message += ', %s' % nick
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(
template.render(
instance_url=get_url_for_instance(instance_id),
url=url,
url_linktext=url_linktext,
message=message))
class RunHeavyTask(webapp2.RequestHandler):
"""Just runs the heavy task with deferred."""
def get(self):
"""Just runs the heavy task with deferred."""
deferred.defer(do_heavy_task)
self.response.write('Put a heavy task on the queue.')
APPLICATION = webapp2.WSGIApplication([
('/', Hello),
('/run_heavy_task', RunHeavyTask),
], debug=True)
| {
"repo_name": "tmatsuo/appengine-python-vm-urlfetch-timeout",
"path": "main.py",
"copies": "1",
"size": "2598",
"license": "apache-2.0",
"hash": 747354666383578600,
"line_mean": 28.5227272727,
"line_max": 80,
"alpha_frac": 0.6485758276,
"autogenerated": false,
"ratio": 3.826215022091311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49747908496913107,
"avg_score": null,
"num_lines": null
} |
"""A simple html INPUT field that acts as a trap for spambots. The general spam-bot
fills in any field there is, ignoring CSS rules and even sometimes falling in a
INPUT TYPE=hidden trap field.
This plugin was based on MathCaptcha by Rob McMullen, which in turn based
database setup code off of BSD licensed TicketModerator by John D. Siirola.
Author: (Absent. This is the first and final version for trac-0.11.x)
License: BSD, like Trac itself
"""
import re
import sys
import time
import urllib
from trac.core import *
from trac.ticket.api import ITicketManipulator
from trac.web.api import ITemplateStreamFilter
from trac.wiki.api import IWikiPageManipulator
from genshi.builder import tag
from genshi.filters.transform import Transformer
class InputfieldTrapPlugin(Component):
implements(ITicketManipulator, ITemplateStreamFilter, IWikiPageManipulator)
def get_content(self, req):
"""Returns the Genshi tags for the HTML INPUT trap element"""
content = tag.div()(
tag.input(type='hidden', name='keepempty', value='')
)
return content
def validate_inputfieldtrap(self, req):
"""Validates that trap field is empty"""
field = req.args.get('keepempty')
if field:
return [(None, "You seem to be a bot - if so, go away!")]
return []
# ITemplateStreamFilter interface
def filter_stream(self, req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi documentation for more information.
"""
# Insert the hidden field right before the submit buttons
stream = stream | Transformer('//div[@class="buttons"]').before(self.get_content(req))
return stream
# ITicketManipulator interface
def validate_ticket(self, req, ticket):
"""Validate a ticket after it's been populated from user input.
Must return a list of `(field, message)` tuples, one for each problem
detected. `field` can be `None` to indicate an overall problem with the
ticket. Therefore, a return value of `[]` means everything is OK."""
return self.validate_inputfieldtrap(req) # if req.authname == "anonymous"
# IWikiPageManipulator interface
def validate_wiki_page(self, req, page):
"""Validate a wiki page after it's been populated from user input.
Must return a list of `(field, message)` tuples, one for each problem
detected. `field` can be `None` to indicate an overall problem with the
ticket. Therefore, a return value of `[]` means everything is OK."""
return self.validate_inputfieldtrap(req) # if req.authname == "anonymous"
| {
"repo_name": "Puppet-Finland/trac",
"path": "files/inputfieldtrapplugin/0.11/inputfieldtrap/InputfieldTrap.py",
"copies": "1",
"size": "3083",
"license": "bsd-2-clause",
"hash": -7791256584193979000,
"line_mean": 37.0617283951,
"line_max": 94,
"alpha_frac": 0.6772624067,
"autogenerated": false,
"ratio": 4.171853856562923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349116263262923,
"avg_score": null,
"num_lines": null
} |
"""A simple HTML parser based on sgmllib's SGMLParser."""
from sgmllib import SGMLParser
class SimpleHTMLParser(SGMLParser):
"""A simple HTML parser based on sgmllib's SGMLParser."""
def __init__(self):
SGMLParser.__init__(self)
self.hyperlinks = []
def reset(self):
SGMLParser.reset(self)
self.hyperlinks = []
def parse(self, text):
self.feed(text)
self.close()
def unknown_starttag(self, tag, attrs):
if tag == 'a':
for name, value in attrs:
if name == 'href':
self.hyperlinks.append(value)
def unknown_endtag(self, tag):
pass
def handle_charref(self, ref):
pass
def handle_entityref(self, ref):
pass
def handle_data(self, text):
pass
def handle_comment(self, text):
pass
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def get_hyperlinks(self):
return self.hyperlinks
def get_clean_text(self):
return self.clean_text
| {
"repo_name": "nitsas/simple-web-search-engine",
"path": "html_parser.py",
"copies": "1",
"size": "1139",
"license": "mit",
"hash": 9103577271141191000,
"line_mean": 20.4905660377,
"line_max": 61,
"alpha_frac": 0.5434591747,
"autogenerated": false,
"ratio": 4.141818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5185277356518181,
"avg_score": null,
"num_lines": null
} |
# A simple HTML table parser. It turns tables (including nested tables) into arrays
# Nigel Sim <nigel.sim@gmail.com>
# http://simbot.wordpress.com
from HTMLParser import HTMLParser
import re, os
from string import lower
class Table(list):
pass
class Row(list):
pass
class Cell(object):
def __init__(self):
self.data = None
return
def append(self,item):
if self.data != None:
print "Overwriting %s"%self.data
self.data = item
# Get the item on the top of a stack
def top(x):
return x[len(x)-1]
class TableParser(HTMLParser):
def __init__(self, parser=None):
"""
The parser is a method which will be passed the doc at the end
of the parsing. Useful if TableParser is within an inner loop and
you want to automatically process the document. If it is omitted then
it will do nothing
"""
self._tag = None
self._buf = None
self._attrs = None
self.doc = None # Where the document will be stored
self._stack = None
self._parser = parser
self.reset()
return
def reset(self):
HTMLParser.reset(self)
self.doc = []
self._stack = [self.doc]
self._buf = ''
def close(self):
HTMLParser.close(self)
if self._parser != None:
self._parser(self.doc)
def handle_starttag(self, tag, attrs):
self._tag = tag
self._attrs = attrs
if lower(tag) == 'table':
self._buf = ''
self._stack.append(Table())
elif lower(tag) == 'tr':
self._buf = ''
self._stack.append(Row())
elif lower(tag) == 'td':
self._buf = ''
self._stack.append(Cell())
#print "Encountered the beginning of a %s tag" % tag
def handle_endtag(self, tag):
if lower(tag) == 'table':
t = None
while not isinstance(t, Table):
t = self._stack.pop()
r = top(self._stack)
r.append(t)
elif lower(tag) == 'tr':
t = None
while not isinstance(t, Row):
t = self._stack.pop()
r = top(self._stack)
r.append(t)
elif lower(tag) == 'td':
c = None
while not isinstance(c, Cell):
c = self._stack.pop()
t = top(self._stack)
if isinstance(t, Row):
# We can not currently have text and other table elements in the same cell.
# Table elements get precedence
if c.data == None:
t.append(self._buf)
else:
t.append(c.data)
else:
print "Cell not in a row, rather in a %s"%t
self._tag = None
#print "Encountered the end of a %s tag" % tag
def handle_data(self, data):
self._buf += data
class ST:
def __init__(self, func, name):
"""
@param func:str-> atr value
@param name attr name
"""
self.func = func
self.name = name
def __call__(self, obj, val):
try:
setattr(obj, self.name, self.func(val))
except Exception, e:
print "Error till parsing field %s for obj %r: %s" %(
self.name, obj, e)
| {
"repo_name": "quoter/protocoller",
"path": "src/protocoller/miner/html_parser.py",
"copies": "1",
"size": "3054",
"license": "mit",
"hash": 2376339353107459000,
"line_mean": 24.2396694215,
"line_max": 85,
"alpha_frac": 0.5671250819,
"autogenerated": false,
"ratio": 3.5265588914549655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9271117330581462,
"avg_score": 0.06451332855470056,
"num_lines": 121
} |
# A simple HTML table parser. It turns tables (including nested tables) into arrays
# Nigel Sim <nigel.sim@gmail.com>
# http://simbot.wordpress.com
from HTMLParser import HTMLParser
import re, string, os
from string import lower
class Table(list):
pass
class Row(list):
pass
class Cell(object):
def __init__(self):
self.data = None
return
def append(self,item):
if self.data != None:
print "Overwriting %s"%self.data
self.data = item
# Get the item on the top of a stack
def top(x):
return x[len(x)-1]
class TableParser(HTMLParser):
def __init__(self, parser=None):
"""
The parser is a method which will be passed the doc at the end
of the parsing. Useful if TableParser is within an inner loop and
you want to automatically process the document. If it is omitted then
it will do nothing
"""
self._tag = None
self._buf = None
self._attrs = None
self.doc = None # Where the document will be stored
self._stack = None
self._parser = parser
self.reset()
#return
def reset(self):
HTMLParser.reset(self)
self.doc = []
self._stack = [self.doc]
self._buf = ''
def close(self):
HTMLParser.close(self)
if self._parser != None:
self._parser(self.doc)
def handle_starttag(self, tag, attrs):
self._tag = tag
self._attrs = attrs
if lower(tag) == 'table':
self._buf = ''
self._stack.append(Table())
elif lower(tag) == 'tr':
self._buf = ''
self._stack.append(Row())
elif lower(tag) == 'td':
self._buf = ''
self._stack.append(Cell())
#print "Encountered the beginning of a %s tag" % tag
def handle_endtag(self, tag):
if lower(tag) == 'table':
t = None
while not isinstance(t, Table):
t = self._stack.pop()
r = top(self._stack)
r.append(t)
elif lower(tag) == 'tr':
t = None
while not isinstance(t, Row):
t = self._stack.pop()
r = top(self._stack)
r.append(t)
elif lower(tag) == 'td':
c = None
while not isinstance(c, Cell):
c = self._stack.pop()
t = top(self._stack)
if isinstance(t, Row):
# We can not currently have text and other table elements in the same cell.
# Table elements get precedence
if c.data == None:
t.append(self._buf)
else:
t.append(c.data)
else:
print "Cell not in a row, rather in a %s"%t
self._tag = None
#print "Encountered the end of a %s tag" % tag
def handle_data(self, data):
self._buf += data
| {
"repo_name": "sniemi/SamPy",
"path": "sandbox/src/TableParse.py",
"copies": "1",
"size": "2874",
"license": "bsd-2-clause",
"hash": 8216635403772872000,
"line_mean": 26.6346153846,
"line_max": 92,
"alpha_frac": 0.5354906054,
"autogenerated": false,
"ratio": 3.936986301369863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4972476906769863,
"avg_score": null,
"num_lines": null
} |
"""A simple HTTP/2 file server."""
from functools import partial
import logging
import os.path
import urllib.parse
import sys
import curio
import curio.io
from garage import asyncs
from garage.asyncs.utils import make_server_socket, serve
import http2
async def handle(sock, addr):
session = http2.Session(sock)
async with await asyncs.cancelling.spawn(session.serve()) as server:
async for stream in session:
request = stream.request
if request.method is not http2.Method.GET:
await stream.submit_response(
http2.Response(status=http2.Status.BAD_REQUEST))
return
path = urllib.parse.unquote(request.path.decode('ascii'))
logging.info('GET %s', path)
if not path.startswith('/'):
await stream.submit_response(
http2.Response(status=http2.Status.BAD_REQUEST))
return
path = path[1:]
if not os.path.isfile(path):
await stream.submit_response(
http2.Response(status=http2.Status.NOT_FOUND))
return
try:
# open() is blocking; an alternative is curio.aopen(),
# but it secretly uses thread behind the scene, which
# might be undesirable
async with curio.io.FileStream(open(path, 'rb')) as contents, \
stream.make_buffer() as buffer:
await stream.submit_response(http2.Response(body=buffer))
while True:
data = await contents.read(65536)
if not data:
break
await buffer.write(data)
except OSError:
logging.exception('err when read %s', path)
await server.join()
def main():
if len(sys.argv) < 2:
print('Usage: %s port [server.crt server.key]' % sys.argv[0])
sys.exit(1)
if len(sys.argv) >= 4:
make_ssl_context = partial(
http2.make_ssl_context, sys.argv[2], sys.argv[3])
else:
make_ssl_context = None
curio.run(serve(
asyncs.Event(),
partial(make_server_socket, ('', int(sys.argv[1]))),
handle,
make_ssl_context=make_ssl_context,
))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| {
"repo_name": "clchiou/garage",
"path": "py/http2/examples/simple_server.py",
"copies": "1",
"size": "2455",
"license": "mit",
"hash": -5833332669108239000,
"line_mean": 30.4743589744,
"line_max": 79,
"alpha_frac": 0.5515274949,
"autogenerated": false,
"ratio": 4.119127516778524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
"""A simple HTTP client which mixes httplib with gevent and PayPal protecteds.
It provides convenience functions for the standard set of `HTTP methods`_:
>>> http_client.get('http://example.com/foo') # doctest: +SKIP
which are just shortcuts for the corresponding :py:func:`request` call:
>>> http_client.request("get", "http://example.com/foo") # doctest: +SKIP
.. _HTTP Methods: http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods
If you don't intend to read the response's body, you should use a
context manager:
>>> with http_client.get('http://www.example.com') as response: # doctest: +SKIP
... assert response.status == 200
This will release the underlying socket back to the socket pool.
"""
import httplib
from urlparse import urlparse, urlunparse
import functools
import urllib2
import os
import json
import context
import connection_mgr
import async
from gevent import socket
from gevent import ssl
# TODO: make and use a better HTTP library instead of wrapping httplib.
# hopefully this is at least a pretty stable abstraction that can migrate over
# ... if nothing else, much better than shrugging our shoulders when someone
# asks how to make an http request
class _GHTTPConnection(httplib.HTTPConnection):
default_port = httplib.HTTP_PORT
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
protected=None):
httplib.HTTPConnection.__init__(self, host, port, strict, timeout)
self.protected = protected
def connect(self):
ctx = context.get_context()
self.sock = ctx.connection_mgr.get_connection(
(self.host, self.port), self.protected,
read_timeout=self.timeout if isinstance(self.timeout, float) else None)
if self._tunnel_host:
self._tunnel()
def release_sock(self):
# print self._HTTPConnection__state, self.sock
if (self._HTTPConnection__state == httplib._CS_IDLE
and self._HTTPConnection__response is None
and self.sock):
context.get_context().connection_mgr.release_connection(self.sock)
self.sock = None
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# TODO
# Don't send a length if this failed
if self.debuglevel > 0:
print "Cannot stat file-type HTTP body."
if thelen is not None:
self.putheader('Content-Length', thelen)
def __del__(self):
self.release_sock()
class _GHTTPSConnection(_GHTTPConnection):
default_port = httplib.HTTPS_PORT
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
_GHTTPConnection.__init__(self, host, port, strict, timeout,
protected=connection_mgr.PLAIN_SSL)
def connect(self):
ctx = context.get_context()
if self._tunnel_host:
# we need to issue CONNECT *prior* to doing any SSL. so
# start off by asking for a plain socket...
self.sock = ctx.connection_mgr.get_connection(
(self.host, self.port), ssl=None,
read_timeout=self.timeout if isinstance(self.timeout, float) else None)
# ...then issue the CONNECT...
self._tunnel()
# ...finally, replace the underlying socket on the
# monitored socket with an SSL wrapped socket that matches
# the kind specified by self.protected. note that this is
# copy-pasted from connection_mgr.py
self.sock._msock = (ssl.wrap_socket(self.sock._msock)
if self.protected == connection_mgr.PLAIN_SSL
else
async.wrap_socket_context(self.sock._msock,
self.protected))
else:
# if we don't need to issue a connect, then the super
# class will do the right thing
_GHTTPConnection.connect(self)
def urllib2_request(u2req, timeout=None):
"""\
Translate a urllib2.Request to something we can pass to our
request() function, and translate our Response to a
urllib2.addinfourl object
"""
# TODO: proxy support?
method = u2req.get_method()
url = u2req._Request__original
body = u2req.get_data()
headers = dict(u2req.unredirected_hdrs)
headers.update((k, v) for k, v in u2req.headers.items()
if k not in headers)
try:
kwargs = {}
if timeout is not None:
kwargs['timeout'] = timeout
resp = request(method, url, body, headers, **kwargs)
hr = resp.http_response
hr.recv = hr.read
fp = socket._fileobject(hr, close=True)
aiu = urllib2.addinfourl(fp=fp,
headers=hr.msg,
url=resp.request.url)
aiu.code = hr.status
aiu.msg = hr.reason
return aiu
except ValueError as e:
raise urllib2.URLError(e.msg)
def request(method, url, body=None, headers=None,
literal=False, use_protected=False,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
'''\
A function to issue HTTP requests.
:param method: the `HTTP method`_ for this request. Case
insensitive.
:param url: the URL to request. Must include a protocol
(e.g. `http`, `https`).
:param body: the body of the request, if applicable
:type body: a string or file-like object (i.e, an object that has
a ``read`` method). It could also be a dict, in which case
it is stringified, and the header set to application/json
:param headers: A dictionary of request headers
:type headers: :py:class:`dict`
:param literal: if true, instruct
:py:class:`~httplib.HTTPConnection` **not** to set the ``Host`` or
``Accept-Encoding`` headers automatically. Useful for testing
:param use_protected: if true, use the appropriate protected for
this call.
:param timeout: connection timeout for this request.
:returns: a :py:class:`Response` object.
An example, calling up google with a custom host header:
>>> request('get',
... 'http://google.com',
... headers={'Host': 'www.google.com'},
... literal=True)
<http_client.Response (200) GET http://google.com>
.. _HTTP Method: http://en.wikipedia.org/wiki/\
Hypertext_Transfer_Protocol#Request_methods
'''
method = method.upper()
if method not in _HTTP_METHODS:
raise ValueError("invalid http method {0}".format(method))
parsed = urlparse(url)
if parsed.scheme not in ('http', 'https'):
raise ValueError('unknown protocol %s' % parsed.scheme)
domain, _, port = parsed.netloc.partition(':')
try:
port = int(port)
except ValueError:
port = 80 if parsed.scheme == 'http' else 443
protected = (parsed.scheme == 'https') and (True if use_protected
else "PLAIN_SSL")
conn = _GHTTPConnection(domain, port, protected=protected, timeout=timeout)
selector = urlunparse(parsed._replace(scheme='', netloc=''))
skips = {'skip_host': True,
'skip_accept_encoding': True} if literal else {}
if headers is None:
headers = {}
if not literal:
headers.setdefault('User-Agent', 'python')
if isinstance(body, dict):
body = json.dumps(body)
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
conn.putrequest(method, selector, **skips)
# OMD!
if not literal and body is not None and 'Content-Length' not in headers:
conn._set_content_length(body)
for header, value in headers.items():
if type(value) is list:
for subvalue in value:
conn.putheader(header, subvalue)
else:
conn.putheader(header, value)
conn.endheaders()
if body is not None:
conn.send(body)
raw = conn.getresponse() # does NOT hold a reference to the
# HTTPConnection
raw._connection = conn # so the finalizer doesn't get called
# until the request has died
return Response(
Request(method, url, headers, body),
raw.status, raw.msg, raw)
class Request(object):
'''\
A simple wrapper for HTTP Requests
.. py:attribute:: method
The method used for this request (e.g., `POST`, `GET`).
.. py:attribute:: url
The requested URL.
.. py:attribute:: headers
The request headers (a :py:class:`list` of two-item :py:class:`tuples`)
.. py:attribute:: body
The body if present, otherwise `None`.
'''
def __init__(self, method, url, headers, body):
self.method = method
self.url = url
self.headers = headers
self.body = body
def __repr__(self):
return "<http_client.Request {0} {1}>".format(self.method, self.url)
class Response(object):
r'''\
A simple wrapper for HTTP responses.
.. py:attribute:: request
the :py:class:`Request` object that lead to this response
.. py:attribute:: status
the numeric status code for this Response
.. py:attribute:: headers
an :py:class:`~httplib.HTTPMessage` object containing this
response's headers. You can treat this as a dictionary: for
example, you can get the value for the ``Host`` header with
``msg['Host']``. **You should, however, be careful with
duplicate headers.**
Consider the following headers:
>>> headers = '\r\n'.join(['X-First-Header: First, Value',
... 'X-First-Header: Second, Value',
... 'X-Second-Header: Final, Value',
... ''])
Note that the header ``X-First-Header`` appears twice.
>>> from StringIO import StringIO
>>> from httplib import HTTPMessage
>>> msg = HTTPMessage(StringIO(headers))
>>> msg['X-First-Header']
'First, Value, Second, Value'
:py:class:`HTTPMessage` has *concatenated* the two values we
provided for `X-First-Header` (`First, Value` and `Second,
Value`) with a comma. Unfortunately both of these values
contain a comma. That means a simple :py:meth:`str.split` can't
Recover the original values:
>>> msg['X-First-Header'].split(', ')
['First', 'Value', 'Second', 'Value']
The same behavior occurs with :meth:`HTTPMessage.items`:
>>> msg.items() # doctest: +NORMALIZE_WHITESPACE
[('x-second-header', 'Final, Value'),
('x-first-header', 'First, Value, Second, Value')]
To correctly recover values from duplicated header fields, use
:meth:`HTTPMessage.getheaders`:
>>> msg.getheaders('X-First-Header')
['First, Value', 'Second, Value']
.. py:attribute:: http_response
the underlying :py:class:`~httplib.HTTPResponse` object for
this response.
'''
def __init__(self, request, status, headers, http_response):
self.request = request
self.status = status
self.headers = headers
self.http_response = http_response
self._body = None
def close(self):
"""Release the underlying socket back to the connection pool. This
will be automatically called by :meth:`~Response.body`
after the body has been read. You should arrange to have this called (
"""
if hasattr(self.http_response, '_connection'):
self.http_response._connection.release_sock()
del self.http_response._connection
self.http_response.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def body(self):
"""the body of the request, if applicable.
Since this value is lazily loaded, if you never access it the
response's body will never be downloaded. Once loaded it's
stored locally, so repeated accesses won't trigger repeated
network calls.
"""
if self._body is None:
with self:
self._body = self.http_response.read()
return self._body
def __repr__(self):
return "<http_client.Response ({0}) {1} {2}>".format(
self.status, self.request.method, self.request.url)
#http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods
_HTTP_METHODS = ('GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS',
'CONNECT', 'PATCH')
def _init_methods():
g = globals()
for m in _HTTP_METHODS:
g[m.lower()] = functools.partial(request, m)
_init_methods()
| {
"repo_name": "paypal/support",
"path": "support/http_client.py",
"copies": "1",
"size": "13326",
"license": "bsd-3-clause",
"hash": 1613965136536507000,
"line_mean": 31.742014742,
"line_max": 90,
"alpha_frac": 0.6010055531,
"autogenerated": false,
"ratio": 4.169586983729662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5270592536829661,
"avg_score": null,
"num_lines": null
} |
# a simple http server for an adventure game
# vim: et sw=2 ts=2 sts=2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from advent import Game
import time
import urlparse
# add games here. As long as they follow the example, they will appear in the drop down menu.
import http_game
# globals
games = {}
state = {}
class State(object):
def __init__(self, game, game_function, session, params):
self.in_apply = 0
self.session = session
self.name = game
self.game = game_function(game)
self.game.http_output = True
self.game.run_init()
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args):
BaseHTTPRequestHandler.__init__(self, *args)
global games
global state
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
if parsed_path.path == '/favicon.ico':
self.send_response(404)
return
params = urlparse.parse_qs(parsed_path.query, True)
if not 'game' in params or not 'session' in params:
self.intro()
return
game = params['game'][0]
if not game in games:
self.intro()
return
session = params['session'][0]
if not session in state:
state[session] = State(game, games[game], session, params)
cmd = ""
if 'cmd' in params:
cmd = params['cmd'][0]
self.play(cmd, state[session])
def header(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("<html><head><title>Adventure</title>")
self.wfile.write("""
<style>
input.prompt { background-color: #000000; color: #ffffff; border: 0px;
font-size: 16pt;
text-decoration: none;
font-family: courier, monospace; }
p.adventure { background-color: #000000; color: #cccccc; border: 0px;
font-size: 16pt;
text-decoration: none;
font-family: courier, monospace; }
</style>
""");
self.wfile.write("</head><body OnLoad=document.f.p.focus()>");
self.wfile.write('<p class="adventure">')
def intro(self):
self.header()
self.wfile.write('<h1>Adventure Game Server</h1><br>')
self.wfile.write('Welcome to the game server. Please select a game from the drop down menu. If you have a saved game or want to save this game enter a one word session name (e.g. joe1234) which will act as your username/password/save slot. Have Fun!<br>')
self.wfile.write('<br><form name="prompt" action="play.html" method="GET">')
self.wfile.write('<input type="hidden" name="cmd" value="">')
session = 'session' + str(time.time())
self.wfile.write('Session: <input type="text" name="session" value="%s"><br>' % session)
self.wfile.write('Game: <select type="text" name="game">')
for (k, v) in games.iteritems():
self.wfile.write('<option>%s</option>' % k)
self.wfile.write('</select><br>')
self.wfile.write('<input type="submit" name="Submit">')
self.wfile.write('</form>')
self.footer()
def footer(self):
self.wfile.write("</body>")
self.wfile.write("""
<script languages="javascript">
<!--
document.f.p.focus()
-->
</script>""")
self.wfile.write("</html>")
def play(self, cmd, state):
state.game.run_step(cmd)
state.game.run_room()
self.header()
self.wfile.write('<p class="adventure">')
t = '<br />'.join(state.game.http_text.split('\n'))
self.wfile.write(t)
self.wfile.write('</p>')
self.wfile.write('<form name="f" action="play.html" method="GET">')
self.wfile.write('<input type="hidden" name="game" value="%s">' % state.name)
self.wfile.write('<input type="hidden" name="session" value="%s">' % state.session)
self.wfile.write('<input id="p" class="prompt" type=text name="cmd" size="72">')
self.wfile.write("</form>")
self.footer()
class Server:
def __init__(self, port):
def handler(*args):
RequestHandler(*args)
global games
games = Game.get_registered_games()
HTTPServer.protocol_version = "HTTP/1.0"
server = HTTPServer(('0.0.0.0', port), handler)
server.serve_forever()
@staticmethod
def serve_http(port):
Server(port)
if __name__ == "__main__":
Server.serve_http(8080)
| {
"repo_name": "sleepinghungry/wwif",
"path": "bwx_adventure/advent_http.py",
"copies": "3",
"size": "4289",
"license": "mit",
"hash": 9041582820610122000,
"line_mean": 32.2480620155,
"line_max": 261,
"alpha_frac": 0.6248542784,
"autogenerated": false,
"ratio": 3.4257188498402558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010273575033769055,
"num_lines": 129
} |
# A simple HTTP server implemented using h11 and Trio:
# http://trio.readthedocs.io/en/latest/index.html
# (so requires python 3.5+).
#
# All requests get echoed back a JSON document containing information about
# the request.
#
# This is a rather involved example, since it attempts to both be
# fully-HTTP-compliant and also demonstrate error handling.
#
# The main difference between an HTTP client and an HTTP server is that in a
# client, if something goes wrong, you can just throw away that connection and
# make a new one. In a server, you're expected to handle all kinds of garbage
# input and internal errors and recover with grace and dignity. And that's
# what this code does.
#
# I recommend pushing on it to see how it works -- e.g. watch what happens if
# you visit http://localhost:8080 in a webbrowser that supports keep-alive,
# hit reload a few times, and then wait for the keep-alive to time out on the
# server.
#
# Or try using curl to start a chunked upload and then hit control-C in the
# middle of the upload:
#
# (for CHUNK in $(seq 10); do echo $CHUNK; sleep 1; done) \
# | curl -T - http://localhost:8080/foo
#
# (Note that curl will send Expect: 100-Continue, too.)
#
# Or, heck, try letting curl complete successfully ;-).
# Some potential improvements, if you wanted to try and extend this to a real
# general-purpose HTTP server (and to give you some hints about the many
# considerations that go into making a robust HTTP server):
#
# - The timeout handling is rather crude -- we impose a flat 10 second timeout
# on each request (starting from the end of the previous
# response). Something finer-grained would be better. Also, if a timeout is
# triggered we unconditionally send a 500 Internal Server Error; it would be
# better to keep track of whether the timeout is the client's fault, and if
# so send a 408 Request Timeout.
#
# - The error handling policy here is somewhat crude as well. It handles a lot
# of cases perfectly, but there are corner cases where the ideal behavior is
# more debateable. For example, if a client starts uploading a large
# request, uses 100-Continue, and we send an error response, then we'll shut
# down the connection immediately (for well-behaved clients) or after
# spending TIMEOUT seconds reading and discarding their upload (for
# ill-behaved ones that go on and try to upload their request anyway). And
# for clients that do this without 100-Continue, we'll send the error
# response and then shut them down after TIMEOUT seconds. This might or
# might not be your preferred policy, though -- maybe you want to shut such
# clients down immediately (even if this risks their not seeing the
# response), or maybe you're happy to let them continue sending all the data
# and wasting your bandwidth if this is what it takes to guarantee that they
# see your error response. Up to you, really.
#
# - Another example of a debateable choice: if a response handler errors out
# without having done *anything* -- hasn't started responding, hasn't read
# the request body -- then this connection actually is salvagable, if the
# server sends an error response + reads and discards the request body. This
# code sends the error response, but it doesn't try to salvage the
# connection by reading the request body, it just closes the
# connection. This is quite possibly the best option, but again this is a
# policy decision.
#
# - Our error pages always include the exception text. In real life you might
# want to log the exception but not send that information to the client.
#
# - Our error responses perhaps should include Connection: close when we know
# we're going to close this connection.
#
# - We don't support the HEAD method, but ought to.
#
# - We should probably do something cleverer with buffering responses and
# TCP_CORK and suchlike.
import json
from itertools import count
from wsgiref.handlers import format_date_time
import trio
import h11
MAX_RECV = 2 ** 16
TIMEOUT = 10
################################################################
# I/O adapter: h11 <-> trio
################################################################
# The core of this could be factored out to be usable for trio-based clients
# too, as well as servers. But as a simplified pedagogical example we don't
# attempt this here.
class TrioHTTPWrapper:
_next_id = count()
def __init__(self, stream):
self.stream = stream
self.conn = h11.Connection(h11.SERVER)
# Our Server: header
self.ident = " ".join(
["h11-example-trio-server/{}".format(h11.__version__), h11.PRODUCT_ID]
).encode("ascii")
# A unique id for this connection, to include in debugging output
# (useful for understanding what's going on if there are multiple
# simultaneous clients).
self._obj_id = next(TrioHTTPWrapper._next_id)
async def send(self, event):
# The code below doesn't send ConnectionClosed, so we don't bother
# handling it here either -- it would require that we do something
# appropriate when 'data' is None.
assert type(event) is not h11.ConnectionClosed
data = self.conn.send(event)
await self.stream.send_all(data)
async def _read_from_peer(self):
if self.conn.they_are_waiting_for_100_continue:
self.info("Sending 100 Continue")
go_ahead = h11.InformationalResponse(
status_code=100, headers=self.basic_headers()
)
await self.send(go_ahead)
try:
data = await self.stream.receive_some(MAX_RECV)
except ConnectionError:
# They've stopped listening. Not much we can do about it here.
data = b""
self.conn.receive_data(data)
async def next_event(self):
while True:
event = self.conn.next_event()
if event is h11.NEED_DATA:
await self._read_from_peer()
continue
return event
async def shutdown_and_clean_up(self):
# When this method is called, it's because we definitely want to kill
# this connection, either as a clean shutdown or because of some kind
# of error or loss-of-sync bug, and we no longer care if that violates
# the protocol or not. So we ignore the state of self.conn, and just
# go ahead and do the shutdown on the socket directly. (If you're
# implementing a client you might prefer to send ConnectionClosed()
# and let it raise an exception if that violates the protocol.)
#
try:
await self.stream.send_eof()
except trio.BrokenResourceError:
# They're already gone, nothing to do
return
# Wait and read for a bit to give them a chance to see that we closed
# things, but eventually give up and just close the socket.
# XX FIXME: possibly we should set SO_LINGER to 0 here, so
# that in the case where the client has ignored our shutdown and
# declined to initiate the close themselves, we do a violent shutdown
# (RST) and avoid the TIME_WAIT?
# it looks like nginx never does this for keepalive timeouts, and only
# does it for regular timeouts (slow clients I guess?) if explicitly
# enabled ("Default: reset_timedout_connection off")
with trio.move_on_after(TIMEOUT):
try:
while True:
# Attempt to read until EOF
got = await self.stream.receive_some(MAX_RECV)
if not got:
break
except trio.BrokenResourceError:
pass
finally:
await self.stream.aclose()
def basic_headers(self):
# HTTP requires these headers in all responses (client would do
# something different here)
return [
("Date", format_date_time(None).encode("ascii")),
("Server", self.ident),
]
def info(self, *args):
# Little debugging method
print("{}:".format(self._obj_id), *args)
################################################################
# Server main loop
################################################################
# General theory:
#
# If everything goes well:
# - we'll get a Request
# - our response handler will read the request body and send a full response
# - that will either leave us in MUST_CLOSE (if the client doesn't
# support keepalive) or DONE/DONE (if the client does).
#
# But then there are many, many different ways that things can go wrong
# here. For example:
# - we don't actually get a Request, but rather a ConnectionClosed
# - exception is raised from somewhere (naughty client, broken
# response handler, whatever)
# - depending on what went wrong and where, we might or might not be
# able to send an error response, and the connection might or
# might not be salvagable after that
# - response handler doesn't fully read the request or doesn't send a
# full response
#
# But these all have one thing in common: they involve us leaving the
# nice easy path up above. So we can just proceed on the assumption
# that the nice easy thing is what's happening, and whenever something
# goes wrong do our best to get back onto that path, and h11 will keep
# track of how successful we were and raise new errors if things don't work
# out.
async def http_serve(stream):
wrapper = TrioHTTPWrapper(stream)
wrapper.info("Got new connection")
while True:
assert wrapper.conn.states == {h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE}
try:
with trio.fail_after(TIMEOUT):
wrapper.info("Server main loop waiting for request")
event = await wrapper.next_event()
wrapper.info("Server main loop got event:", event)
if type(event) is h11.Request:
await send_echo_response(wrapper, event)
except Exception as exc:
wrapper.info("Error during response handler: {!r}".format(exc))
await maybe_send_error_response(wrapper, exc)
if wrapper.conn.our_state is h11.MUST_CLOSE:
wrapper.info("connection is not reusable, so shutting down")
await wrapper.shutdown_and_clean_up()
return
else:
try:
wrapper.info("trying to re-use connection")
wrapper.conn.start_next_cycle()
except h11.ProtocolError:
states = wrapper.conn.states
wrapper.info("unexpected state", states, "-- bailing out")
await maybe_send_error_response(
wrapper, RuntimeError("unexpected state {}".format(states))
)
await wrapper.shutdown_and_clean_up()
return
################################################################
# Actual response handlers
################################################################
# Helper function
async def send_simple_response(wrapper, status_code, content_type, body):
wrapper.info("Sending", status_code, "response with", len(body), "bytes")
headers = wrapper.basic_headers()
headers.append(("Content-Type", content_type))
headers.append(("Content-Length", str(len(body))))
res = h11.Response(status_code=status_code, headers=headers)
await wrapper.send(res)
await wrapper.send(h11.Data(data=body))
await wrapper.send(h11.EndOfMessage())
async def maybe_send_error_response(wrapper, exc):
# If we can't send an error, oh well, nothing to be done
wrapper.info("trying to send error response...")
if wrapper.conn.our_state not in {h11.IDLE, h11.SEND_RESPONSE}:
wrapper.info("...but I can't, because our state is", wrapper.conn.our_state)
return
try:
if isinstance(exc, h11.RemoteProtocolError):
status_code = exc.error_status_hint
elif isinstance(exc, trio.TooSlowError):
status_code = 408 # Request Timeout
else:
status_code = 500
body = str(exc).encode("utf-8")
await send_simple_response(
wrapper, status_code, "text/plain; charset=utf-8", body
)
except Exception as exc:
wrapper.info("error while sending error response:", exc)
async def send_echo_response(wrapper, request):
wrapper.info("Preparing echo response")
if request.method not in {b"GET", b"POST"}:
# Laziness: we should send a proper 405 Method Not Allowed with the
# appropriate Accept: header, but we don't.
raise RuntimeError("unsupported method")
response_json = {
"method": request.method.decode("ascii"),
"target": request.target.decode("ascii"),
"headers": [
(name.decode("ascii"), value.decode("ascii"))
for (name, value) in request.headers
],
"body": "",
}
while True:
event = await wrapper.next_event()
if type(event) is h11.EndOfMessage:
break
assert type(event) is h11.Data
response_json["body"] += event.data.decode("ascii")
response_body_unicode = json.dumps(
response_json, sort_keys=True, indent=4, separators=(",", ": ")
)
response_body_bytes = response_body_unicode.encode("utf-8")
await send_simple_response(
wrapper, 200, "application/json; charset=utf-8", response_body_bytes
)
async def serve(port):
print("listening on http://localhost:{}".format(port))
try:
await trio.serve_tcp(http_serve, port)
except KeyboardInterrupt:
print("KeyboardInterrupt - shutting down")
################################################################
# Run the server
################################################################
if __name__ == "__main__":
trio.run(serve, 8080)
| {
"repo_name": "python-hyper/h11",
"path": "examples/trio-server.py",
"copies": "1",
"size": "13993",
"license": "mit",
"hash": -7272696534125454000,
"line_mean": 41.021021021,
"line_max": 84,
"alpha_frac": 0.6341027657,
"autogenerated": false,
"ratio": 4.160868272375855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5294971038075855,
"avg_score": null,
"num_lines": null
} |
"""A simple ImagePlaneWidget module to view image data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool, on_trait_change
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.module import Module
from mayavi.core.common import error
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `ImagePlaneWidget` class.
######################################################################
class ImagePlaneWidget(Module):
# The version of this class. Used for persistence.
__version__ = 0
ipw = Instance(tvtk.ImagePlaneWidget, allow_none=False, record=True)
use_lookup_table = Bool(True,
help='Use a lookup table to map input scalars to colors')
input_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['scalars'])
view = View(Group(Item(name='ipw', style='custom', resizable=True),
show_labels=False
),
width=600,
height=600,
resizable=True,
scrollable=True
)
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create the various objects for this module.
self.ipw = tvtk.ImagePlaneWidget(display_text=1,
key_press_activation=0,
left_button_action=1,
middle_button_action=0,
user_controlled_lookup_table=True)
self.setup_lut()
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mod_mgr = self.module_manager
if mod_mgr is None:
return
# Data is available, so set the input for the IPW.
input = mod_mgr.source.outputs[0]
if not (input.is_a('vtkStructuredPoints') \
or input.is_a('vtkImageData')):
msg = 'ImagePlaneWidget only supports structured points or '\
'image data.'
error(msg)
raise TypeError(msg)
self.configure_input_data(self.ipw, input)
self.setup_lut()
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the component should do the rest.
self.data_changed = True
@on_trait_change('use_lookup_table')
def setup_lut(self):
# Set the LUT for the IPW.
if self.use_lookup_table:
if self.module_manager is not None:
self.ipw.lookup_table = \
self.module_manager.scalar_lut_manager.lut
else:
self.ipw.color_map.lookup_table = None
self.render()
######################################################################
# Non-public methods.
######################################################################
def _ipw_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
self.widgets.remove(old)
new.on_trait_change(self.render)
self.widgets.append(new)
if old is not None:
self.update_pipeline()
self.pipeline_changed = True
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/modules/image_plane_widget.py",
"copies": "1",
"size": "4599",
"license": "bsd-3-clause",
"hash": -6773647079528875000,
"line_mean": 35.792,
"line_max": 75,
"alpha_frac": 0.5298978039,
"autogenerated": false,
"ratio": 4.622110552763819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5652008356663819,
"avg_score": null,
"num_lines": null
} |
"""A simple ImagePlaneWidget module to view image data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool, on_trait_change
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.module import Module
from mayavi.core.common import error
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `ImagePlaneWidget` class.
######################################################################
class ImagePlaneWidget(Module):
# The version of this class. Used for persistence.
__version__ = 0
ipw = Instance(tvtk.ImagePlaneWidget, allow_none=False, record=True)
use_lookup_table = Bool(True,
help='Use a lookup table to map input scalars to colors')
input_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['scalars'])
view = View(Group(Item(name='ipw', style='custom', resizable=True),
show_labels=False
),
width=600,
height=600,
resizable=True,
scrollable=True
)
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create the various objects for this module.
self.ipw = tvtk.ImagePlaneWidget(display_text=1,
key_press_activation=0,
left_button_action=1,
middle_button_action=0,
user_controlled_lookup_table=True)
self.setup_lut()
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mod_mgr = self.module_manager
if mod_mgr is None:
return
# Data is available, so set the input for the IPW.
input = mod_mgr.source.outputs[0]
if not (input.is_a('vtkStructuredPoints') \
or input.is_a('vtkImageData')):
msg = 'ImagePlaneWidget only supports structured points or '\
'image data.'
error(msg)
raise TypeError, msg
self.configure_input_data(self.ipw, input)
self.setup_lut()
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Just set data_changed, the component should do the rest.
self.data_changed = True
@on_trait_change('use_lookup_table')
def setup_lut(self):
# Set the LUT for the IPW.
if self.use_lookup_table:
if self.module_manager is not None:
self.ipw.lookup_table = \
self.module_manager.scalar_lut_manager.lut
else:
self.ipw.color_map.lookup_table = None
self.render()
######################################################################
# Non-public methods.
######################################################################
def _ipw_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
self.widgets.remove(old)
new.on_trait_change(self.render)
self.widgets.append(new)
if old is not None:
self.update_pipeline()
self.pipeline_changed = True
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/modules/image_plane_widget.py",
"copies": "2",
"size": "4597",
"license": "bsd-3-clause",
"hash": -4210866002973421600,
"line_mean": 34.9140625,
"line_max": 75,
"alpha_frac": 0.5292582119,
"autogenerated": false,
"ratio": 4.634072580645161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008753654861235013,
"num_lines": 128
} |
"""A simple implementation of a greedy transition-based parser. Released under BSD license."""
from os import path
import os
import sys
from collections import defaultdict
import random
import time
import pickle
SHIFT = 0; RIGHT = 1; LEFT = 2;
MOVES = (SHIFT, RIGHT, LEFT)
START = ['-START-', '-START2-']
END = ['-END-', '-END2-']
class DefaultList(list):
"""A list that returns a default value if index out of bounds."""
def __init__(self, default=None):
self.default = default
list.__init__(self)
def __getitem__(self, index):
try:
return list.__getitem__(self, index)
except IndexError:
return self.default
class Parse(object):
def __init__(self, n):
self.n = n
self.heads = [None] * (n-1)
self.labels = [None] * (n-1)
self.lefts = []
self.rights = []
for i in range(n+1):
self.lefts.append(DefaultList(0))
self.rights.append(DefaultList(0))
def add(self, head, child, label=None):
self.heads[child] = head
self.labels[child] = label
if child < head:
self.lefts[head].append(child)
else:
self.rights[head].append(child)
class Parser(object):
def __init__(self, load=True):
model_dir = os.path.dirname(__file__)
self.model = Perceptron(MOVES)
if load:
self.model.load(path.join(model_dir, 'parser.pickle'))
self.tagger = PerceptronTagger(load=load)
self.confusion_matrix = defaultdict(lambda: defaultdict(int))
def save(self):
self.model.save(path.join(os.path.dirname(__file__), 'parser.pickle'))
self.tagger.save()
def parse(self, words):
n = len(words)
i = 2; stack = [1]; parse = Parse(n)
tags = self.tagger.tag(words)
while stack or (i+1) < n:
features = extract_features(words, tags, i, n, stack, parse)
scores = self.model.score(features)
valid_moves = get_valid_moves(i, n, len(stack))
guess = max(valid_moves, key=lambda move: scores[move])
i = transition(guess, i, stack, parse)
return tags, parse.heads
def train_one(self, itn, words, gold_tags, gold_heads):
n = len(words)
i = 2; stack = [1]; parse = Parse(n)
tags = self.tagger.tag(words)
while stack or (i + 1) < n:
features = extract_features(words, tags, i, n, stack, parse)
scores = self.model.score(features)
valid_moves = get_valid_moves(i, n, len(stack))
gold_moves = get_gold_moves(i, n, stack, parse.heads, gold_heads)
guess = max(valid_moves, key=lambda move: scores[move])
assert gold_moves
best = max(gold_moves, key=lambda move: scores[move])
self.model.update(best, guess, features)
i = transition(guess, i, stack, parse)
self.confusion_matrix[best][guess] += 1
return len([i for i in range(n-1) if parse.heads[i] == gold_heads[i]])
def transition(move, i, stack, parse):
if move == SHIFT:
stack.append(i)
return i + 1
elif move == RIGHT:
parse.add(stack[-2], stack.pop())
return i
elif move == LEFT:
parse.add(i, stack.pop())
return i
assert move in MOVES
def get_valid_moves(i, n, stack_depth):
moves = []
if (i+1) < n:
moves.append(SHIFT)
if stack_depth >= 2:
moves.append(RIGHT)
if stack_depth >= 1:
moves.append(LEFT)
return moves
def get_gold_moves(n0, n, stack, heads, gold):
def deps_between(target, others, gold):
for word in others:
if gold[word] == target or gold[target] == word:
return True
return False
valid = get_valid_moves(n0, n, len(stack))
if not stack or (SHIFT in valid and gold[n0] == stack[-1]):
return [SHIFT]
if gold[stack[-1]] == n0:
return [LEFT]
costly = set([m for m in MOVES if m not in valid])
# If the word behind s0 is its gold head, Left is incorrect
if len(stack) >= 2 and gold[stack[-1]] == stack[-2]:
costly.add(LEFT)
# If there are any dependencies between n0 and the stack,
# pushing n0 will lose them.
if SHIFT not in costly and deps_between(n0, stack, gold):
costly.add(SHIFT)
# If there are any dependencies between s0 and the buffer, popping
# s0 will lose them.
if deps_between(stack[-1], range(n0+1, n-1), gold):
costly.add(LEFT)
costly.add(RIGHT)
return [m for m in MOVES if m not in costly]
def extract_features(words, tags, n0, n, stack, parse):
def get_stack_context(depth, stack, data):
if depth >= 3:
return data[stack[-1]], data[stack[-2]], data[stack[-3]]
elif depth >= 2:
return data[stack[-1]], data[stack[-2]], ''
elif depth == 1:
return data[stack[-1]], '', ''
else:
return '', '', ''
def get_buffer_context(i, n, data):
if i + 1 >= n:
return data[i], '', ''
elif i + 2 >= n:
return data[i], data[i + 1], ''
else:
return data[i], data[i + 1], data[i + 2]
def get_parse_context(word, deps, data):
if word == -1:
return 0, '', ''
deps = deps[word]
valency = len(deps)
if not valency:
return 0, '', ''
elif valency == 1:
return 1, data[deps[-1]], ''
else:
return valency, data[deps[-1]], data[deps[-2]]
features = {}
# Set up the context pieces --- the word (W) and tag (T) of:
# S0-2: Top three words on the stack
# N0-2: First three words of the buffer
# n0b1, n0b2: Two leftmost children of the first word of the buffer
# s0b1, s0b2: Two leftmost children of the top word of the stack
# s0f1, s0f2: Two rightmost children of the top word of the stack
depth = len(stack)
s0 = stack[-1] if depth else -1
Ws0, Ws1, Ws2 = get_stack_context(depth, stack, words)
Ts0, Ts1, Ts2 = get_stack_context(depth, stack, tags)
Wn0, Wn1, Wn2 = get_buffer_context(n0, n, words)
Tn0, Tn1, Tn2 = get_buffer_context(n0, n, tags)
Vn0b, Wn0b1, Wn0b2 = get_parse_context(n0, parse.lefts, words)
Vn0b, Tn0b1, Tn0b2 = get_parse_context(n0, parse.lefts, tags)
Vn0f, Wn0f1, Wn0f2 = get_parse_context(n0, parse.rights, words)
_, Tn0f1, Tn0f2 = get_parse_context(n0, parse.rights, tags)
Vs0b, Ws0b1, Ws0b2 = get_parse_context(s0, parse.lefts, words)
_, Ts0b1, Ts0b2 = get_parse_context(s0, parse.lefts, tags)
Vs0f, Ws0f1, Ws0f2 = get_parse_context(s0, parse.rights, words)
_, Ts0f1, Ts0f2 = get_parse_context(s0, parse.rights, tags)
# Cap numeric features at 5?
# String-distance
Ds0n0 = min((n0 - s0, 5)) if s0 != 0 else 0
features['bias'] = 1
# Add word and tag unigrams
for w in (Wn0, Wn1, Wn2, Ws0, Ws1, Ws2, Wn0b1, Wn0b2, Ws0b1, Ws0b2, Ws0f1, Ws0f2):
if w:
features['w=%s' % w] = 1
for t in (Tn0, Tn1, Tn2, Ts0, Ts1, Ts2, Tn0b1, Tn0b2, Ts0b1, Ts0b2, Ts0f1, Ts0f2):
if t:
features['t=%s' % t] = 1
# Add word/tag pairs
for i, (w, t) in enumerate(((Wn0, Tn0), (Wn1, Tn1), (Wn2, Tn2), (Ws0, Ts0))):
if w or t:
features['%d w=%s, t=%s' % (i, w, t)] = 1
# Add some bigrams
features['s0w=%s, n0w=%s' % (Ws0, Wn0)] = 1
features['wn0tn0-ws0 %s/%s %s' % (Wn0, Tn0, Ws0)] = 1
features['wn0tn0-ts0 %s/%s %s' % (Wn0, Tn0, Ts0)] = 1
features['ws0ts0-wn0 %s/%s %s' % (Ws0, Ts0, Wn0)] = 1
features['ws0-ts0 tn0 %s/%s %s' % (Ws0, Ts0, Tn0)] = 1
features['wt-wt %s/%s %s/%s' % (Ws0, Ts0, Wn0, Tn0)] = 1
features['tt s0=%s n0=%s' % (Ts0, Tn0)] = 1
features['tt n0=%s n1=%s' % (Tn0, Tn1)] = 1
# Add some tag trigrams
trigrams = ((Tn0, Tn1, Tn2), (Ts0, Tn0, Tn1), (Ts0, Ts1, Tn0),
(Ts0, Ts0f1, Tn0), (Ts0, Ts0f1, Tn0), (Ts0, Tn0, Tn0b1),
(Ts0, Ts0b1, Ts0b2), (Ts0, Ts0f1, Ts0f2), (Tn0, Tn0b1, Tn0b2),
(Ts0, Ts1, Ts1))
for i, (t1, t2, t3) in enumerate(trigrams):
if t1 or t2 or t3:
features['ttt-%d %s %s %s' % (i, t1, t2, t3)] = 1
# Add some valency and distance features
vw = ((Ws0, Vs0f), (Ws0, Vs0b), (Wn0, Vn0b))
vt = ((Ts0, Vs0f), (Ts0, Vs0b), (Tn0, Vn0b))
d = ((Ws0, Ds0n0), (Wn0, Ds0n0), (Ts0, Ds0n0), (Tn0, Ds0n0),
('t' + Tn0+Ts0, Ds0n0), ('w' + Wn0+Ws0, Ds0n0))
for i, (w_t, v_d) in enumerate(vw + vt + d):
if w_t or v_d:
features['val/d-%d %s %d' % (i, w_t, v_d)] = 1
return features
class Perceptron(object):
def __init__(self, classes=None):
# Each feature gets its own weight vector, so weights is a dict-of-arrays
self.classes = classes
self.weights = {}
# The accumulated values, for the averaging. These will be keyed by
# feature/clas tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/clas tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features):
'''Dot-product the features and current weights and return the best class.'''
scores = self.score(features)
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda clas: (scores[clas], clas))
def score(self, features):
all_weights = self.weights
scores = dict((clas, 0) for clas in self.classes)
for feat, value in features.items():
if value == 0:
continue
if feat not in all_weights:
continue
weights = all_weights[feat]
for clas, weight in weights.items():
scores[clas] += value * weight
return scores
def update(self, truth, guess, features):
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self):
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
def save(self, path):
print "Saving model to %s" % path
pickle.dump(self.weights, open(path, 'w'))
def load(self, path):
self.weights = pickle.load(open(path))
class PerceptronTagger(object):
'''Greedy Averaged Perceptron tagger'''
model_loc = os.path.join(os.path.dirname(__file__), 'tagger.pickle')
def __init__(self, classes=None, load=True):
self.tagdict = {}
if classes:
self.classes = classes
else:
self.classes = set()
self.model = Perceptron(self.classes)
if load:
self.load(PerceptronTagger.model_loc)
def tag(self, words, tokenize=True):
prev, prev2 = START
tags = DefaultList('')
context = START + [self._normalize(w) for w in words] + END
for i, word in enumerate(words):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
tags.append(tag)
prev2 = prev; prev = tag
return tags
def start_training(self, sentences):
self._make_tagdict(sentences)
self.model = Perceptron(self.classes)
def train(self, sentences, save_loc=None, nr_iter=5):
'''Train a model from sentences, and save it at save_loc. nr_iter
controls the number of Perceptron training iterations.'''
self.start_training(sentences)
for iter_ in range(nr_iter):
for words, tags in sentences:
self.train_one(words, tags)
random.shuffle(sentences)
self.end_training(save_loc)
def save(self):
# Pickle as a binary file
pickle.dump((self.model.weights, self.tagdict, self.classes),
open(PerceptronTagger.model_loc, 'wb'), -1)
def train_one(self, words, tags):
prev, prev2 = START
context = START + [self._normalize(w) for w in words] + END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev; prev = guess
def load(self, loc):
w_td_c = pickle.load(open(loc, 'rb'))
self.model.weights, self.tagdict, self.classes = w_td_c
self.model.classes = self.classes
def _normalize(self, word):
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
def _make_tagdict(self, sentences):
'''Make a tag dictionary for single-tag words.'''
counts = defaultdict(lambda: defaultdict(int))
for sent in sentences:
for word, tag in zip(sent[0], sent[1]):
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _pc(n, d):
return (float(n) / d) * 100
def train(parser, sentences, nr_iter):
parser.tagger.start_training(sentences)
for itn in range(nr_iter):
corr = 0; total = 0
random.shuffle(sentences)
for words, gold_tags, gold_parse, gold_label in sentences:
corr += parser.train_one(itn, words, gold_tags, gold_parse)
if itn < 5:
parser.tagger.train_one(words, gold_tags)
total += len(words)
print itn, '%.3f' % (float(corr) / float(total))
if itn == 4:
parser.tagger.model.average_weights()
print 'Averaging weights'
parser.model.average_weights()
def read_pos(loc):
for line in open(loc):
if not line.strip():
continue
words = DefaultList('')
tags = DefaultList('')
for token in line.split():
if not token:
continue
word, tag = token.rsplit('/', 1)
#words.append(normalize(word))
words.append(word)
tags.append(tag)
pad_tokens(words); pad_tokens(tags)
yield words, tags
def read_conll(loc):
for sent_str in open(loc).read().strip().split('\n\n'):
lines = [line.split() for line in sent_str.split('\n')]
words = DefaultList(''); tags = DefaultList('')
heads = [None]; labels = [None]
for i, (word, pos, head, label) in enumerate(lines):
words.append(intern(word))
#words.append(intern(normalize(word)))
tags.append(intern(pos))
heads.append(int(head) + 1 if head != '-1' else len(lines) + 1)
labels.append(label)
pad_tokens(words); pad_tokens(tags)
yield words, tags, heads, labels
def pad_tokens(tokens):
tokens.insert(0, '<start>')
tokens.append('ROOT')
def main(model_dir, train_loc, heldout_in, heldout_gold):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
input_sents = list(read_pos(heldout_in))
parser = Parser(load=False)
sentences = list(read_conll(train_loc))
train(parser, sentences, nr_iter=15)
parser.save()
c = 0
t = 0
gold_sents = list(read_conll(heldout_gold))
t1 = time.time()
for (words, tags), (_, _, gold_heads, gold_labels) in zip(input_sents, gold_sents):
_, heads = parser.parse(words)
for i, w in list(enumerate(words))[1:-1]:
if gold_labels[i] in ('P', 'punct'):
continue
if heads[i] == gold_heads[i]:
c += 1
t += 1
t2 = time.time()
print 'Parsing took %0.3f ms' % ((t2-t1)*1000.0)
print c, t, float(c)/t
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
| {
"repo_name": "mdda/ConciseGreedyDependencyParser-in-Scala",
"path": "python/honnibal-original-gist.py",
"copies": "1",
"size": "18445",
"license": "mit",
"hash": -4045423303082530300,
"line_mean": 34.5394990366,
"line_max": 94,
"alpha_frac": 0.5520737327,
"autogenerated": false,
"ratio": 3.2089422407794017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9240663722831963,
"avg_score": 0.004070450129487665,
"num_lines": 519
} |
"""A simple implementation of a greedy transition-based parser. Released under BSD license."""
from os import path
import os
import sys
from collections import defaultdict
import random
import time
import pickle
import re # For sentence splitting only
SHIFT = 0; RIGHT = 1; LEFT = 2;
MOVES = (SHIFT, RIGHT, LEFT)
START = ['-START-', '-START2-']
END = ['-END-', '-END2-']
class DefaultList(list):
"""A list that returns a default value if index out of bounds."""
def __init__(self, default=None):
self.default = default
list.__init__(self)
def __getitem__(self, index):
try:
return list.__getitem__(self, index)
except IndexError:
return self.default
class Parse(object):
def __init__(self, n):
self.n = n
self.heads = [None] * (n-1)
#self.labels = [None] * (n-1)
self.lefts = []
self.rights = []
for i in range(n+1):
self.lefts.append(DefaultList(0))
self.rights.append(DefaultList(0))
def add(self, head, child, label=None):
self.heads[child] = head
#self.labels[child] = label
if child < head:
self.lefts[head].append(child)
else:
self.rights[head].append(child)
class Parser(object):
def __init__(self, load=True):
model_dir = os.path.join(os.path.dirname(__file__), 'models/')
self.model = Perceptron(MOVES)
if load:
self.model.load(path.join(model_dir, 'parser.pickle'))
self.tagger = PerceptronTagger(load=load)
self.confusion_matrix = defaultdict(lambda: defaultdict(int))
def save(self):
self.model.save(path.join(os.path.dirname(__file__), 'models/parser.pickle'))
self.tagger.save()
def parse(self, words):
n = len(words)
i = 2; stack = [1]; parse = Parse(n)
tags = self.tagger.tag(words)
while stack or (i+1) < n:
features = extract_features(words, tags, i, n, stack, parse)
scores = self.model.score(features)
valid_moves = get_valid_moves(i, n, len(stack))
guess = max(valid_moves, key=lambda move: scores[move])
i = transition(guess, i, stack, parse)
return tags, parse.heads
def train_one(self, itn, words, gold_tags, gold_heads):
n = len(words)
#print "train_one(%d, n=%d, %s)" % (itn, n, words, )
#print " gold_heads = %s" % (gold_heads, )
i = 2; stack = [1]; parse = Parse(n)
tags = self.tagger.tag(words)
while stack or (i + 1) < n:
#print " i/n=%d/%d stack=" % (i,n ), stack
features = extract_features(words, tags, i, n, stack, parse)
scores = self.model.score(features)
valid_moves = get_valid_moves(i, n, len(stack))
guess = max(valid_moves, key=lambda move: scores[move])
gold_moves = get_gold_moves(i, n, stack, parse.heads, gold_heads)
assert gold_moves
best = max(gold_moves, key=lambda move: scores[move])
self.model.update(best, guess, features)
i = transition(guess, i, stack, parse)
self.confusion_matrix[best][guess] += 1
return len([i for i in range(n-1) if parse.heads[i] == gold_heads[i]])
def transition(move, i, stack, parse):
if move == SHIFT:
stack.append(i)
return i + 1
elif move == RIGHT:
parse.add(stack[-2], stack.pop())
return i
elif move == LEFT:
parse.add(i, stack.pop())
return i
assert move in MOVES
def get_valid_moves(i, n, stack_depth):
moves = []
if (i+1) < n:
moves.append(SHIFT)
if stack_depth >= 2:
moves.append(RIGHT)
if stack_depth >= 1:
moves.append(LEFT)
return moves
def get_gold_moves(n0, n, stack, heads, gold):
def deps_between(target, others, gold):
for word in others:
if gold[word] == target or gold[target] == word:
return True
return False
valid = get_valid_moves(n0, n, len(stack))
if not stack or (SHIFT in valid and gold[n0] == stack[-1]):
return [SHIFT]
if gold[stack[-1]] == n0:
return [LEFT]
costly = set([m for m in MOVES if m not in valid])
#print "Costly = ", costly
# If the word behind s0 is its gold head, Left is incorrect
if len(stack) >= 2 and gold[stack[-1]] == stack[-2]:
costly.add(LEFT)
# If there are any dependencies between n0 and the stack,
# pushing n0 will lose them.
if SHIFT not in costly and deps_between(n0, stack, gold):
costly.add(SHIFT)
# If there are any dependencies between s0 and the buffer, popping
# s0 will lose them.
if deps_between(stack[-1], range(n0+1, n-1), gold):
costly.add(LEFT)
costly.add(RIGHT)
return [m for m in MOVES if m not in costly]
def extract_features(words, tags, n0, n, stack, parse):
def get_stack_context(depth, stack, data):
if depth >= 3:
return data[stack[-1]], data[stack[-2]], data[stack[-3]]
elif depth >= 2:
return data[stack[-1]], data[stack[-2]], ''
elif depth == 1:
return data[stack[-1]], '', ''
else:
return '', '', ''
def get_buffer_context(i, n, data):
if i + 1 >= n:
return data[i], '', ''
elif i + 2 >= n:
return data[i], data[i + 1], ''
else:
return data[i], data[i + 1], data[i + 2]
def get_parse_context(word, deps, data):
if word == -1:
return 0, '', ''
deps = deps[word]
valency = len(deps)
if not valency:
return 0, '', ''
elif valency == 1:
return 1, data[deps[-1]], ''
else:
return valency, data[deps[-1]], data[deps[-2]]
features = {}
# Set up the context pieces --- the word (W) and tag (T) of:
# S0-2: Top three words on the stack
# N0-2: First three words of the buffer
# n0b1, n0b2: Two leftmost children of the first word of the buffer
# s0b1, s0b2: Two leftmost children of the top word of the stack
# s0f1, s0f2: Two rightmost children of the top word of the stack
depth = len(stack)
s0 = stack[-1] if depth else -1
Ws0, Ws1, Ws2 = get_stack_context(depth, stack, words)
Ts0, Ts1, Ts2 = get_stack_context(depth, stack, tags)
Wn0, Wn1, Wn2 = get_buffer_context(n0, n, words)
Tn0, Tn1, Tn2 = get_buffer_context(n0, n, tags)
Vn0b, Wn0b1, Wn0b2 = get_parse_context(n0, parse.lefts, words)
Vn0b, Tn0b1, Tn0b2 = get_parse_context(n0, parse.lefts, tags)
Vn0f, Wn0f1, Wn0f2 = get_parse_context(n0, parse.rights, words)
_, Tn0f1, Tn0f2 = get_parse_context(n0, parse.rights, tags)
Vs0b, Ws0b1, Ws0b2 = get_parse_context(s0, parse.lefts, words)
_, Ts0b1, Ts0b2 = get_parse_context(s0, parse.lefts, tags)
Vs0f, Ws0f1, Ws0f2 = get_parse_context(s0, parse.rights, words)
_, Ts0f1, Ts0f2 = get_parse_context(s0, parse.rights, tags)
# Cap numeric features at 5?
# String-distance
Ds0n0 = min((n0 - s0, 5)) if s0 != 0 else 0
features['bias'] = 1
# Add word and tag unigrams
for w in (Wn0, Wn1, Wn2, Ws0, Ws1, Ws2, Wn0b1, Wn0b2, Ws0b1, Ws0b2, Ws0f1, Ws0f2):
if w:
features['w=%s' % w] = 1
for t in (Tn0, Tn1, Tn2, Ts0, Ts1, Ts2, Tn0b1, Tn0b2, Ts0b1, Ts0b2, Ts0f1, Ts0f2):
if t:
features['t=%s' % t] = 1
# Add word/tag pairs
for i, (w, t) in enumerate(((Wn0, Tn0), (Wn1, Tn1), (Wn2, Tn2), (Ws0, Ts0))):
if w or t:
features['%d w=%s, t=%s' % (i, w, t)] = 1
# Add some bigrams
features['s0w=%s, n0w=%s' % (Ws0, Wn0)] = 1
features['wn0tn0-ws0 %s/%s %s' % (Wn0, Tn0, Ws0)] = 1
features['wn0tn0-ts0 %s/%s %s' % (Wn0, Tn0, Ts0)] = 1
features['ws0ts0-wn0 %s/%s %s' % (Ws0, Ts0, Wn0)] = 1
features['ws0-ts0 tn0 %s/%s %s' % (Ws0, Ts0, Tn0)] = 1
features['wt-wt %s/%s %s/%s' % (Ws0, Ts0, Wn0, Tn0)] = 1
features['tt s0=%s n0=%s' % (Ts0, Tn0)] = 1
features['tt n0=%s n1=%s' % (Tn0, Tn1)] = 1
# Add some tag trigrams
trigrams = ((Tn0, Tn1, Tn2), (Ts0, Tn0, Tn1), (Ts0, Ts1, Tn0),
(Ts0, Ts0f1, Tn0), (Ts0, Ts0f1, Tn0), (Ts0, Tn0, Tn0b1),
(Ts0, Ts0b1, Ts0b2), (Ts0, Ts0f1, Ts0f2), (Tn0, Tn0b1, Tn0b2),
(Ts0, Ts1, Ts1))
for i, (t1, t2, t3) in enumerate(trigrams):
if t1 or t2 or t3:
features['ttt-%d %s %s %s' % (i, t1, t2, t3)] = 1
# Add some valency and distance features
vw = ((Ws0, Vs0f), (Ws0, Vs0b), (Wn0, Vn0b))
vt = ((Ts0, Vs0f), (Ts0, Vs0b), (Tn0, Vn0b))
d = ((Ws0, Ds0n0), (Wn0, Ds0n0), (Ts0, Ds0n0), (Tn0, Ds0n0),
('t' + Tn0+Ts0, Ds0n0), ('w' + Wn0+Ws0, Ds0n0))
for i, (w_t, v_d) in enumerate(vw + vt + d):
if w_t or v_d:
features['val/d-%d %s %d' % (i, w_t, v_d)] = 1
return features
class Perceptron(object):
def __init__(self, classes=None):
# Each feature gets its own weight vector, so weights is a dict-of-arrays
self.classes = classes
self.weights = {}
# The accumulated values, for the averaging. These will be keyed by
# feature/clas tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/clas tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features):
'''Dot-product the features and current weights and return the best class.'''
scores = self.score(features)
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda clas: (scores[clas], clas))
def score(self, features):
all_weights = self.weights
scores = dict((clas, 0) for clas in self.classes)
for feat, value in features.items():
if value == 0:
continue
if feat not in all_weights:
continue
weights = all_weights[feat]
for clas, weight in weights.items():
scores[clas] += value * weight
return scores
def update(self, truth, guess, features):
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self):
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
def save(self, path):
print "Saving model to %s" % path
pickle.dump(self.weights, open(path, 'w'))
def load(self, path):
self.weights = pickle.load(open(path))
class PerceptronTagger(object):
'''Greedy Averaged Perceptron tagger'''
model_loc = os.path.join(os.path.dirname(__file__), 'models/tagger.pickle')
def __init__(self, classes=None, load=True):
self.tagdict = {}
if classes:
self.classes = classes
else:
self.classes = set()
self.model = Perceptron(self.classes)
if load:
self.load(PerceptronTagger.model_loc)
def tag(self, words, tokenize=True):
prev, prev2 = START
tags = DefaultList('')
context = START + [self._normalize(w) for w in words] + END
for i, word in enumerate(words):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
tags.append(tag)
prev2 = prev; prev = tag
return tags
def start_training(self, sentences):
self._make_tagdict(sentences)
self.model = Perceptron(self.classes)
""" UNUSED : THERE IS NO end_training here...
def train(self, sentences, save_loc=None, nr_iter=5):
'''Train a model from sentences, and save it at save_loc. nr_iter
controls the number of Perceptron training iterations.'''
self.start_training(sentences)
for iter_ in range(nr_iter):
for words, tags in sentences:
self.train_one(words, tags)
random.shuffle(sentences)
self.end_training(save_loc)
"""
def save(self):
# Pickle as a binary file
pickle.dump((self.model.weights, self.tagdict, self.classes),
open(PerceptronTagger.model_loc, 'wb'), -1)
def train_one(self, words, tags):
prev, prev2 = START
context = START + [self._normalize(w) for w in words] + END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev; prev = guess
def load(self, loc):
w_td_c = pickle.load(open(loc, 'rb'))
self.model.weights, self.tagdict, self.classes = w_td_c
self.model.classes = self.classes
def _normalize(self, word):
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
def _make_tagdict(self, sentences):
'''Make a tag dictionary for single-tag words.'''
counts = defaultdict(lambda: defaultdict(int))
for sent in sentences:
#print type(sent[1])
for word, tag in zip(sent[0], sent[1]):
#print " %s : %s" % (word, tag, )
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag
print "_make_tagdict added : %10s -> %10s" % (word, tag, )
def _pc(n, d):
return (float(n) / d) * 100
def train_tagger(parser, sentences, nr_iter):
parser.tagger.start_training(sentences)
for itn in range(nr_iter):
random.shuffle(sentences)
for words, gold_tags, gold_parse in sentences:
parser.tagger.train_one(words, gold_tags)
print 'Averaging weights'
parser.model.average_weights()
def train(parser, sentences, nr_iter):
parser.tagger.start_training(sentences)
for itn in range(nr_iter):
corr = 0; total = 0
random.shuffle(sentences)
#for words, gold_tags, gold_parse, gold_label in sentences:
for words, gold_tags, gold_parse in sentences:
corr += parser.train_one(itn, words, gold_tags, gold_parse)
if itn < 5:
parser.tagger.train_one(words, gold_tags)
total += len(words)
print itn, '%.3f' % (float(corr) / float(total))
if itn == 4: ## Why now?
parser.tagger.model.average_weights()
print 'Averaging weights'
parser.model.average_weights()
def read_pos(loc):
for line in open(loc):
if not line.strip():
continue
words = DefaultList('')
tags = DefaultList('')
for token in line.split():
if not token:
continue
word, tag = token.rsplit('/', 1)
#words.append(normalize(word))
words.append(word)
tags.append(tag)
pad_tokens(words); pad_tokens(tags)
yield words, tags
def read_conll(loc):
print "read_conll(%s)" % (loc, )
for sent_str in open(loc).read().strip().split('\n\n'):
lines = [line.split() for line in sent_str.split('\n')]
words = DefaultList('')
tags = DefaultList('')
heads = [None]
labels = [None]
for i, (word, pos, head, label) in enumerate(lines):
#print "%d = %s" % (i, word)
words.append(intern(word))
#words.append(intern(normalize(word)))
tags.append(intern(pos))
heads.append(int(head) + 1 if head != '-1' else len(lines) + 1)
labels.append(label)
pad_tokens(words)
pad_tokens(tags)
yield words, tags, heads, labels
def read_conll_mdda(loc):
print "read_conll_mdda(%s)" % (loc, )
for sent_str in open(loc).read().strip().split('\n\n'):
lines = [line.split() for line in sent_str.split('\n')]
words = DefaultList('')
tags = DefaultList('')
heads = [None]
for i, (word, pos, head) in enumerate(lines):
#print "%d = %s" % (i, word)
words.append(intern(word))
#words.append(intern(normalize(word)))
tags.append(intern(pos))
heads.append(int(head) if head != '0' else len(lines) + 1) # mdda : don't increment our file...
pad_tokens(words)
pad_tokens(tags)
#print "END OF SENTENCE"
yield words, tags, heads
def pad_tokens(tokens):
tokens.insert(0, '<start>')
tokens.append('ROOT')
def main_orig(model_dir, train_loc, heldout_in, heldout_gold):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
input_sents = list(read_pos(heldout_in))
parser = Parser(load=False)
sentences = list(read_conll(train_loc))
train(parser, sentences, nr_iter=15)
parser.save()
c = 0
t = 0
gold_sents = list(read_conll(heldout_gold))
t1 = time.time()
for (words, tags), (_, _, gold_heads, gold_labels) in zip(input_sents, gold_sents):
_, heads = parser.parse(words)
for i, w in list(enumerate(words))[1:-1]:
if gold_labels[i] in ('P', 'punct'):
continue
if heads[i] == gold_heads[i]:
c += 1
t += 1
t2 = time.time()
print 'Parsing took %0.3f ms' % ((t2-t1)*1000.0)
print c, t, float(c)/t
def learn_mdda(model_dir, train_loc, load_if_exists=False):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
random.seed(04) # Has some successes, the failure on assert(gold_moves)
parser = Parser(load=load_if_exists)
sentences = list()
for f in [f for f in os.listdir(train_loc) if os.path.isfile(os.path.join(train_loc, f)) and f.endswith(".dp")]:
sentences.extend(list(read_conll_mdda(os.path.join(train_loc, f))))
#break # Just 1 set of sentences to start
#print sentences
#train_tagger(parser, sentences, nr_iter=10)
train(parser, sentences, nr_iter=15)
#parser.save()
return
#input_sents = list(read_pos(heldout_in))
c = 0
t = 0
gold_sents = list(read_conll(heldout_gold))
t1 = time.time()
for (words, tags), (_, _, gold_heads, gold_labels) in zip(input_sents, gold_sents):
_, heads = parser.parse(words)
for i, w in list(enumerate(words))[1:-1]:
if gold_labels[i] in ('P', 'punct'):
continue
if heads[i] == gold_heads[i]:
c += 1
t += 1
t2 = time.time()
print 'Parsing took %0.3f ms' % ((t2-t1)*1000.0)
print c, t, float(c)/t
def sentence_to_words(sentence):
s = sentence
s = s.replace("...", " #ELIPSIS# ")
s = s.replace(",", " , ").replace(".", " . ").replace(":", " : ").replace(";", " ; ")
s = s.replace("$", " $ ")
s = re.sub(r"(\d+\.?\d*)", r" \1 ", s)
s = s.replace("\"", " \" ").replace("''", " '' ").replace("``", " `` ")
s = s.replace(" #ELIPSIS# ", " ... ")
s = s.replace("'t", " 't ").replace("'m", " 'm ").replace("'ve", " 've ").replace("'d", " 'd ")
words = DefaultList('')
for w in s.split(): # On all whitespace
words.append(w)
return words
"""
## Redshift run parameters (for hints)
./scripts/train.py -x zhang+stack -k 8 -p ~/data/stanford/train.conll ~/data/parsers/tmp
./scripts/parse.py ~/data/parsers/tmp ~/data/stanford/devi.txt /tmp/parse/
./scripts/evaluate.py /tmp/parse/parses ~/data/stanford/dev.conll
"""
if __name__ == '__main__':
#main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
if False :
t1 = time.time()
learn_mdda("models", "/home/andrewsm/nltk_data/corpora/dependency_treebank/")
t2 = time.time()
print 'Tagger Learning took %0.3f s' % ((t2-t1)*1.0)
if True :
t1 = time.time()
learn_mdda("models", "/home/andrewsm/nltk_data/corpora/dependency_treebank/")
t2 = time.time()
print 'Combined Learning took %0.3f s' % ((t2-t1)*1.0)
# But each tagger learning took ~ 7.4sec
print 'Dependecy Learning (without tagger) the took %0.3f s' % ((t2-t1)*1.0 - 5*7.4)
if False :
sentences = [
"Sorry for taking a little longer than just the weekend : Though I think I've now got a good handle on the problem, the given solutions, and have realistic ideas about what you can expect from an implementation...",
"I've attached a ZIP of your originals, plus some derived entities - which I extracted using open source (non-commercial) software that was trained on 'generic' documents. ",
"FWIW, these appear to be Gnosis-level results.",
"Having dug into the area a little, I'm convinced that the greatest gains will come from a system that learns as it goes (and will therefore adapt to your specific datasets). "
"For instance, your Singapore data is very comprehensive, with 'gold standard' entities and relationship already extracted from it. ",
"So for new documents, the system would have a lot of additional data to hone its 'hypotheses' on, and should be expected to do pretty well.",
"For the new Malaysian data (for example), a generic machine learner won't already know that 'Dato'' is a significant marker for Person-hood, but would quickly be able to figure it out once a few examples had been tagged expertly.",
"Similarly, the date/period extraction tasks would be improved quickly, which are plainly more important for your work than for 'generic' documents. "
"Thus, a cooperative system could be expected to rise fairly quickly to Invoxis-level quality, and grow dynamically beyond that.",
"Does it make sense for me to put together some notes in a quick presentation (to send to you ~tonight) - and then to follow up with a quick sit-down sometime soon? ",
"On one hand : I don't have anything specific to sell you.",
"On the other hand : these systems are not a one-size-fits-all kind of thing.",
"Would you be free to meet up and chat some time the week after next? ",
"Might be easier if we have a face to face informal discussion, and save you the need to put together a presentation."
"As discussed here is a sample set of docs for you. ",
"The excel file named POC Sample includes a starting example that we gave to the vendor for them to understand what detail and level of structure we need in our output.",
"Ding Zhigang (the brother of CEO - Ms Ding Zhiying) is our Non-Executive Director and Non-Executive Chairman and was appointed on 8 August 2008. ",
"He oversees the Group's overall business strategies for future development. ",
"From 2005 to the present, he is the general manager of Wuxi Dadi Real Estate Co. ",
"He was the president of Wuxi Xin-Dingqiu Real Estate Co. from June 2002 to 2005. ",
"Between 1999 and 2001, he was the vice-president of Jiangsu Dingqiu Industrial Co. Ltd. ",
"Between 1995 and 1999, he was the general manager of China Trading Resource Inc. in the USA.",
"Ding Zhiying is our CEO and was appointed as Executive Director on 20 April 2004. ",
"She oversees the day-to-day management and operations of the Group as well as the sales and marketing of the Group. ",
"She is also responsible for the Group's overall business strategies and policies. ",
"Prior to joining the Group, Ms Ding was the general manager of Jiangsu Xindingqiu between August 2001 and February 2004. ",
"Between 1994 and 2000, Ms Ding worked in the Shenzhen office of Yixing Silk-Linen Factory and was in charge of regional sales. ",
"Prior to that, she was in charge of finance in Yixing Taihua Silk Weaving Factory from 1990 to 1993. ",
"She obtained an accounting certificate from the Nanjing University, Adult Educational Institute in December 1991.",
]
parser = Parser(load=True)
for s in sentences:
words = sentence_to_words(s)
pad_tokens(words)
tags, structure = parser.parse(words)
for (i, (w,t,s)) in enumerate(zip(words, tags, structure)):
#print i, " : ", w, " - ", t, " - ", s
if s>=len(words)-1: s='ROOT'
print "%2d : %20s : %4s - %2s" % (i+0, w, t, s)
| {
"repo_name": "mdda/ConciseGreedyDependencyParser-in-Scala",
"path": "python/concise-greedy-dependency-parser.py",
"copies": "1",
"size": "27187",
"license": "mit",
"hash": 6939710676246888000,
"line_mean": 38.2875722543,
"line_max": 241,
"alpha_frac": 0.5703829036,
"autogenerated": false,
"ratio": 3.281076514602945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4351459418202945,
"avg_score": null,
"num_lines": null
} |
'''A simple implementation of a LRU dict that supports discarding by maximum
capacity and by maximum time not being used.'''
from collections import OrderedDict
import time
class LRUDict(OrderedDict):
'''An dict that can discard least-recently-used items, either by maximum capacity
or by time to live.
An item's ttl is refreshed (aka the item is considered "used") by direct access
via [] or get() only, not via iterating over the whole collection with items()
for example.
Expired entries only get purged after insertions or changes. Either call purge()
manually or check an item's ttl with ttl() if that's unacceptable.
'''
def __init__(self, *args, maxduration=None, maxsize=128, **kwargs):
'''Same arguments as OrderedDict with these 2 additions:
maxduration: number of seconds entries are kept. 0 or None means no timelimit.
maxsize: maximum number of entries being kept.'''
super().__init__(*args, **kwargs)
self.maxduration = maxduration
self.maxsize = maxsize
self.purge()
def purge(self):
"""Removes expired or overflowing entries."""
if self.maxsize:
# pop until maximum capacity is reached
overflowing = max(0, len(self) - self.maxsize)
for _ in range(overflowing):
self.popitem(last=False)
if self.maxduration:
# expiration limit
limit = time.time() - self.maxduration
# as long as there are still items in the dictionary
while self:
# look at the oldest (front)
_, lru = next(iter(super().values()))
# if it is within the timelimit, we're fine
if lru > limit:
break
# otherwise continue to pop the front
self.popitem(last=False)
def __getitem__(self, key):
# retrieve item
value = super().__getitem__(key)[0]
# update lru time
super().__setitem__(key, (value, time.time()))
self.move_to_end(key)
return value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def ttl(self, key):
'''Returns the number of seconds this item will live.
The item might still be deleted if maxsize is reached.
The time to live can be negative, as for expired items
that have not been purged yet.'''
if self.maxduration:
lru = super().__getitem__(key)[1]
return self.maxduration - (time.time() - lru)
def __setitem__(self, key, value):
super().__setitem__(key, (value, time.time()))
self.purge()
def items(self):
# remove ttl from values
return ((k, v) for k, (v, _) in super().items())
def values(self):
# remove ttl from values
return (v for v, _ in super().values())
def main():
dct = LRUDict(maxduration=2)
print(dct) # empty
dct["a"] = 5
time.sleep(1)
print(dct) # a
dct["b"] = 10
time.sleep(1.5)
print(dct) # a, b
dct["c"] = 20
print(dct) # b, c
print(dct.get("a"))
print(dct["b"])
print(dct["c"])
time.sleep(1)
dct.purge()
print(dct) # c
for k, v in dct.items():
print("k:%s, v:%s" % (k, v))
for v in dct.values():
print("v:%s" % (v, ))
if __name__ == "__main__":
main()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/580645_LRU_dictionary/recipe-580645.py",
"copies": "2",
"size": "3488",
"license": "mit",
"hash": -4153105808697327600,
"line_mean": 32.5384615385,
"line_max": 86,
"alpha_frac": 0.5688073394,
"autogenerated": false,
"ratio": 4.065268065268065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5634075404668065,
"avg_score": null,
"num_lines": null
} |
"""A simple implementation of the quick hull algorithm.
Usually you should only need the L{qhull3d} function, although the module
contains some potentially useful helper functions as well.
Examples
========
Tetrahedron
-----------
>>> import random
>>> tetrahedron = [(0,0,0),(1,0,0),(0,1,0),(0,0,1)]
>>> for i in range(200):
... alpha = random.random()
... beta = random.random()
... gamma = 1 - alpha - beta
... if gamma >= 0:
... tetrahedron.append((alpha, beta, gamma))
>>> verts, triangles = qhull3d(tetrahedron)
>>> (0,0,0) in verts
True
>>> (1,0,0) in verts
True
>>> (0,1,0) in verts
True
>>> (0,0,1) in verts
True
>>> len(verts)
4
>>> len(triangles)
4
A double pyramid polyhedron
---------------------------
>>> poly = [(2,0,0),(0,2,0),(-2,0,0),(0,-2,0),(0,0,2),(0,0,-2)]
>>> vertices, triangles = qhull3d(poly)
>>> len(vertices)
6
>>> len(triangles)
8
>>> for triangle in triangles: # check orientation relative to origin
... verts = [ vertices[i] for i in triangle ]
... assert(vecDotProduct(vecCrossProduct(*verts[:2]), verts[2]) == 8)
A pyramid
---------
>>> verts, triangles = qhull3d([(0,0,0),(1,0,0),(0,1,0),(1,1,0),(0.5,0.5,1)])
>>> (0,0,0) in verts
True
>>> (1,0,0) in verts
True
>>> (0,1,0) in verts
True
>>> (1,1,0) in verts
True
>>> len(verts)
5
>>> len(triangles)
6
The unit cube
-------------
>>> import random
>>> cube = [(0,0,0),(0,0,1),(0,1,0),(1,0,0),(0,1,1),(1,0,1),(1,1,0),(1,1,1)]
>>> for i in range(200):
... cube.append((random.random(), random.random(), random.random()))
>>> verts, triangles = qhull3d(cube)
>>> len(triangles) # 6 faces, written as 12 triangles
12
>>> len(verts)
8
A degenerate shape: the unit square
-----------------------------------
>>> import random
>>> plane = [(0,0,0),(1,0,0),(0,1,0),(1,1,0)]
>>> for i in range(200):
... plane.append((random.random(), random.random(), 0))
>>> verts, triangles = qhull3d(plane)
>>> len(verts)
4
>>> len(triangles)
2
A random shape
--------------
>>> import random
>>> shape = []
>>> for i in range(2000):
... vert = (random.random(), random.random(), random.random())
... shape.append(vert)
>>> verts, triangles = qhull3d(shape)
Precision
---------
>>> plane = [(0,0,0),(1,0,0),(0,1,0),(1,1,0),(1.001, 0.001, 0)]
>>> verts, triangles = qhull3d(plane, precision=0.1)
>>> len(verts)
4
>>> len(triangles)
2
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from pyffi.utils.mathutils import *
import operator
# adapted from
# http://en.literateprograms.org/Quickhull_(Python,_arrays)
def qdome2d(vertices, base, normal, precision = 0.0001):
"""Build a convex dome from C{vertices} on top of the two C{base} vertices,
in the plane with normal C{normal}. This is a helper function for
L{qhull2d}, and should usually not be called directly.
:param vertices: The vertices to construct the dome from.
:param base: Two vertices that serve as a base for the dome.
:param normal: Orientation of the projection plane used for calculating
distances.
:param precision: Distance used to decide whether points lie outside of
the hull or not.
:return: A list of vertices that make up a fan of the dome."""
vert0, vert1 = base
outer = [ (dist, vert)
for dist, vert
in zip( ( vecDotProduct(vecCrossProduct(normal,
vecSub(vert1, vert0)),
vecSub(vert, vert0))
for vert in vertices ),
vertices )
if dist > precision ]
if outer:
pivot = max(outer)[1]
outer_verts = list(map(operator.itemgetter(1), outer))
return qdome2d(outer_verts, [vert0, pivot], normal, precision) \
+ qdome2d(outer_verts, [pivot, vert1], normal, precision)[1:]
else:
return base
def qhull2d(vertices, normal, precision = 0.0001):
"""Simple implementation of the 2d quickhull algorithm in 3 dimensions for
vertices viewed from the direction of C{normal}.
Returns a fan of vertices that make up the surface. Called by
L{qhull3d} to convexify coplanar vertices.
>>> import random
>>> import math
>>> plane = [(0,0,0),(1,0,0),(0,1,0),(1,1,0)]
>>> for i in range(200):
... plane.append((random.random(), random.random(), 0))
>>> verts = qhull2d(plane, (0,0,1))
>>> len(verts)
4
>>> disc = []
>>> for i in range(50):
... theta = (2 * math.pi * i) / 50
... disc.append((0, math.sin(theta), math.cos(theta)))
>>> verts = qhull2d(disc, (1,0,0))
>>> len(verts)
50
>>> for i in range(400):
... disc.append((0, 1.4 * random.random() - 0.7, 1.4 * random.random() - 0.7))
>>> verts = qhull2d(disc, (1,0,0))
>>> len(verts)
50
>>> dist = 2 * math.pi / 50
>>> for i in range(len(verts) - 1):
... assert(abs(vecDistance(verts[i], verts[i+1]) - dist) < 0.001)
:param vertices: The vertices to construct the hull from.
:param normal: Orientation of the projection plane used for calculating
distances.
:param precision: Distance used to decide whether points lie outside of
the hull or not.
:return: A list of vertices that make up a fan of extreme points.
"""
base = basesimplex3d(vertices, precision)
if len(base) >= 2:
vert0, vert1 = base[:2]
return qdome2d(vertices, [vert0, vert1], normal, precision) \
+ qdome2d(vertices, [vert1, vert0], normal, precision)[1:-1]
else:
return base
def basesimplex3d(vertices, precision = 0.0001):
"""Find four extreme points, to be used as a starting base for the
quick hull algorithm L{qhull3d}.
The algorithm tries to find four points that are
as far apart as possible, because that speeds up the quick hull
algorithm. The vertices are ordered so their signed volume is positive.
If the volume zero up to C{precision} then only three vertices are
returned. If the vertices are colinear up to C{precision} then only two
vertices are returned. Finally, if the vertices are equal up to C{precision}
then just one vertex is returned.
>>> import random
>>> cube = [(0,0,0),(0,0,1),(0,1,0),(1,0,0),(0,1,1),(1,0,1),(1,1,0),(1,1,1)]
>>> for i in range(200):
... cube.append((random.random(), random.random(), random.random()))
>>> base = basesimplex3d(cube)
>>> len(base)
4
>>> (0,0,0) in base
True
>>> (1,1,1) in base
True
:param vertices: The vertices to construct extreme points from.
:param precision: Distance used to decide whether points coincide,
are colinear, or coplanar.
:return: A list of one, two, three, or four vertices, depending on the
the configuration of the vertices.
"""
# sort axes by their extent in vertices
extents = sorted(list(range(3)),
key=lambda i:
max(vert[i] for vert in vertices)
- min(vert[i] for vert in vertices))
# extents[0] has the index with largest extent etc.
# so let us minimize and maximize vertices with key
# (vert[extents[0]], vert[extents[1]], vert[extents[2]])
# which we can write as operator.itemgetter(*extents)(vert)
vert0 = min(vertices, key=operator.itemgetter(*extents))
vert1 = max(vertices, key=operator.itemgetter(*extents))
# check if all vertices coincide
if vecDistance(vert0, vert1) < precision:
return [ vert0 ]
# as a third extreme point select that one which maximizes the distance
# from the vert0 - vert1 axis
vert2 = max(vertices,
key=lambda vert: vecDistanceAxis((vert0, vert1), vert))
#check if all vertices are colinear
if vecDistanceAxis((vert0, vert1), vert2) < precision:
return [ vert0, vert1 ]
# as a fourth extreme point select one which maximizes the distance from
# the v0, v1, v2 triangle
vert3 = max(vertices,
key=lambda vert: abs(vecDistanceTriangle((vert0, vert1, vert2),
vert)))
# ensure positive orientation and check if all vertices are coplanar
orientation = vecDistanceTriangle((vert0, vert1, vert2), vert3)
if orientation > precision:
return [ vert0, vert1, vert2, vert3 ]
elif orientation < -precision:
return [ vert1, vert0, vert2, vert3 ]
else:
# coplanar
return [ vert0, vert1, vert2 ]
def qhull3d(vertices, precision = 0.0001, verbose = False):
"""Return the triangles making up the convex hull of C{vertices}.
Considers distances less than C{precision} to be zero (useful to simplify
the hull of a complex mesh, at the expense of exactness of the hull).
:param vertices: The vertices to find the hull of.
:param precision: Distance used to decide whether points lie outside of
the hull or not. Larger numbers mean fewer triangles, but some vertices
may then end up outside the hull, at a distance of no more than
C{precision}.
:param verbose: Print information about what the algorithm is doing. Only
useful for debugging.
:return: A list cointaining the extreme points of C{vertices}, and
a list of triangle indices containing the triangles that connect
all extreme points.
"""
# find a simplex to start from
hull_vertices = basesimplex3d(vertices, precision)
# handle degenerate cases
if len(hull_vertices) == 3:
# coplanar
hull_vertices = qhull2d(vertices, vecNormal(*hull_vertices), precision)
return hull_vertices, [ (0, i+1, i+2)
for i in range(len(hull_vertices) - 2) ]
elif len(hull_vertices) <= 2:
# colinear or singular
# no triangles for these cases
return hull_vertices, []
# construct list of triangles of this simplex
hull_triangles = set([ operator.itemgetter(i,j,k)(hull_vertices)
for i, j, k in ((1,0,2), (0,1,3), (0,3,2), (3,1,2)) ])
if verbose:
print("starting set", hull_vertices)
# construct list of outer vertices for each triangle
outer_vertices = {}
for triangle in hull_triangles:
outer = \
[ (dist, vert)
for dist, vert
in zip( ( vecDistanceTriangle(triangle, vert)
for vert in vertices ),
vertices )
if dist > precision ]
if outer:
outer_vertices[triangle] = outer
# as long as there are triangles with outer vertices
while outer_vertices:
# grab a triangle and its outer vertices
tmp_iter = iter(outer_vertices.items())
triangle, outer = next(tmp_iter) # tmp_iter trick to make 2to3 work
# calculate pivot point
pivot = max(outer)[1]
if verbose:
print("pivot", pivot)
# add it to the list of extreme vertices
hull_vertices.append(pivot)
# and update the list of triangles:
# 1. calculate visibility of triangles to pivot point
visibility = [ vecDistanceTriangle(othertriangle, pivot) > precision
for othertriangle in outer_vertices.keys() ]
# 2. get list of visible triangles
visible_triangles = [ othertriangle
for othertriangle, visible
in zip(iter(outer_vertices.keys()), visibility)
if visible ]
# 3. find all edges of visible triangles
visible_edges = []
for visible_triangle in visible_triangles:
visible_edges += [operator.itemgetter(i,j)(visible_triangle)
for i, j in ((0,1),(1,2),(2,0))]
if verbose:
print("visible edges", visible_edges)
# 4. construct horizon: edges that are not shared with another triangle
horizon_edges = [ edge for edge in visible_edges
if not tuple(reversed(edge)) in visible_edges ]
# 5. remove visible triangles from list
# this puts a hole inside the triangle list
visible_outer = set()
for outer_verts in outer_vertices.values():
visible_outer |= set(map(operator.itemgetter(1), outer_verts))
for triangle in visible_triangles:
if verbose:
print("removing", triangle)
hull_triangles.remove(triangle)
del outer_vertices[triangle]
# 6. close triangle list by adding cone from horizon to pivot
# also update the outer triangle list as we go
for edge in horizon_edges:
newtriangle = edge + ( pivot, )
newouter = \
[ (dist, vert)
for dist, vert in zip( ( vecDistanceTriangle(newtriangle,
vert)
for vert in visible_outer ),
visible_outer )
if dist > precision ]
hull_triangles.add(newtriangle)
if newouter:
outer_vertices[newtriangle] = newouter
if verbose:
print("adding", newtriangle, newouter)
# no triangle has outer vertices anymore
# so the convex hull is complete!
# remap the triangles to indices that point into hull_vertices
return hull_vertices, [ tuple(hull_vertices.index(vert)
for vert in triangle)
for triangle in hull_triangles ]
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "griest024/PokyrimTools",
"path": "pyffi-develop/pyffi/utils/quickhull.py",
"copies": "1",
"size": "15381",
"license": "mit",
"hash": -7011242747924011000,
"line_mean": 36.2421307506,
"line_max": 86,
"alpha_frac": 0.6079578701,
"autogenerated": false,
"ratio": 3.6858375269590224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47937953970590225,
"avg_score": null,
"num_lines": null
} |
"""A simple implementation of the specification patter"""
class CompositeQuery(object):
"""Base class for the specification"""
def conjoin(self, other):
"""ConjunctionQuery factory"""
return ConjunctionQuery(self, other)
def __and__(self, other):
return self.conjoin(other)
def disjoin(self, other):
"""DisjunctionQuery factory"""
return DisjunctionQuery(self, other)
def __or__(self, other):
return self.disjoin(other)
def negate(self):
"""NegationQuery factory"""
return NegationQuery(self)
def __invert__(self):
return self.negate()
class ConjunctionQuery(CompositeQuery):
"""Represents a logical conjunction (AND)"""
def __init__(self, left, right):
self.left = left
self.right = right
def to_tuple(self):
return "and", self.left.to_tuple(), self.right.to_tuple()
class DisjunctionQuery(CompositeQuery):
"""Represents a logical disjunction (OR)"""
def __init__(self, left, right):
self.left = left
self.right = right
def to_tuple(self):
return "or", self.left.to_tuple(), self.right.to_tuple()
class NegationQuery(CompositeQuery):
"""Represents a logical negation (NOT)"""
def __init__(self, query):
self.query = query
def to_tuple(self):
return "not", self.query.to_tuple() | {
"repo_name": "myemma/EmmaPython",
"path": "emma/query/spec.py",
"copies": "1",
"size": "1390",
"license": "mit",
"hash": 3574405324476459500,
"line_mean": 26.82,
"line_max": 65,
"alpha_frac": 0.6215827338,
"autogenerated": false,
"ratio": 3.9942528735632186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032573355540476633,
"num_lines": 50
} |
# a simple implementation to find the longest repeat in a string
import collections
import sys
def extend(a, b, k, s, mm=0):
# extend maximum distance with remaining mismatches
lhs_mm = mm
lhs_dist = []
for offset in xrange(-1, max(-a, -b), -1):
if s[a+offset] != s[b+offset]:
lhs_dist.append( -offset )
lhs_mm -= 1
if lhs_mm == 0:
break
rhs_mm = mm
rhs_dist = []
for offset in xrange(1, min( len(s) - a, len(s) - b)):
if s[a+k+offset] != s[b+k+offset]:
rhs_dist.append( offset )
rhs_mm -= 1
if rhs_mm == 0:
break
return a[:i]
# build suffixes, sort them
def lrs(s, k, mm):
kmers = collections.defaultdict(set)
print "building kmers for %i..." % len(s)
for i in xrange(len(s)-k):
kmers[s[i:i+k].append(i)
if i % 10000 == 0:
print i, "..."
best = ''
print "%i k-mers" % len(kmers)
for kmer in kmers:
if len(kmers[kmer]) > 0:
for a, b in itertools.combinations( kmers[kmer], 2 ):
cand = extend( a, b, k, s, mm )
if len(cand) > len(best):
best = cand
print "new best:", len(best):
return best
# build kmers, compare
def lrs_mm(s, mm=0):
pass
s = []
for line in sys.stdin:
if line.startswith('>'):
continue
s.append( line.strip() )
if len(sys.argv) < 2:
print "Usage: %s k mm" % sys.argv[0]
print "Note: set k to expected-len / mm"
sys.exit(1)
k = int(sys.argv[1])
mm = int(sys.argv[2])
best = lrs( ''.join(s), k, mm )
print best
print len(best)
| {
"repo_name": "supernifty/mgsa",
"path": "mgsa/longest_repeat_mismatches.py",
"copies": "1",
"size": "1533",
"license": "mit",
"hash": -8423077541689592000,
"line_mean": 22.2272727273,
"line_max": 64,
"alpha_frac": 0.5609915199,
"autogenerated": false,
"ratio": 2.8232044198895028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38841959397895026,
"avg_score": null,
"num_lines": null
} |
# a simple implementation to find the longest repeat in a string
import sys
def lcp(a, b):
for i in xrange(min(len(a), len(b))):
if a[i] != b[i]:
break
return a[:i]
# build suffixes, sort them
def lrs(s):
suffixes = []
total = len(s)
print "building suffixes for %i..." % total
for i in xrange(total):
#suffixes.append( s[i:] )
suffixes.append( buffer( s, i ) )
if i % 10000 == 0:
print i, "..."
print "sorting suffixes..."
suffixes.sort()
print "searching longest..."
best = ''
best_pos = None
for i in xrange(0, total - 1):
cand = lcp(suffixes[i], suffixes[i+1]);
if len(cand) > len(best):
best = cand
best_pos = (total - len(suffixes[i]), total - len(suffixes[i+1]))
print "new best: %i" % len(best)
return best, best_pos
s = []
name = ''
for line in sys.stdin:
if line.startswith('>'):
if len(s) > 0:
best = lrs( ''.join(s) )
print best
print name, len(best)
s = []
name = line[1:]
continue
s.append( line.strip() )
best, best_pos = lrs( ''.join(s) )
print best
print len(best), best_pos
| {
"repo_name": "supernifty/mgsa",
"path": "mgsa/longest_repeat.py",
"copies": "1",
"size": "1116",
"license": "mit",
"hash": 6615417280296163000,
"line_mean": 22.25,
"line_max": 71,
"alpha_frac": 0.5689964158,
"autogenerated": false,
"ratio": 3.032608695652174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41016051114521734,
"avg_score": null,
"num_lines": null
} |
# A simple in-memory stats library
#
# Inspired by statsd: http://statsd.readthedocs.io/en/v3.1/types.html#gauges
import collections
import json
import logging
import numbers
import numpy as np
import os
import six
import threading
import time
logger = logging.getLogger(__name__)
BYTES = 'bytes'
SECONDS = 'seconds'
class Error(Exception):
pass
class ExponentialAverage(object):
def __init__(self, decay=0.1):
self.decay = decay
self.last_update = None
self.last_data_decay = None
self._avg = None
def add(self, data):
assert isinstance(data, numbers.Number)
if self.last_update is None:
self._avg = data
self.last_update = time.time()
self.last_data_decay = 1
else:
now = time.time()
delta = now - self.last_update
if delta < 0:
# Time is allowed to go a little backwards (NTP update, etc)
logger.warn("Backwards delta value: {}".format(delta))
# Treat this entry as if it happened with 0 delta
delta = 0
if delta != 0:
self.last_data_decay = (1 - self.decay**delta) * 1/delta
self._avg = self.decay**delta * self._avg + self.last_data_decay * data
else:
# Don't divide by zero; just reuse the last delta. Should stack well
self._avg += self.last_data_decay * data
self.last_update = now
def avg(self):
return self._avg
class RunningVariance(object):
""" Implements Welford's algorithm for computing a running mean
and standard deviation as described at:
http://www.johndcook.com/standard_deviation.html
can take single values or iterables
Properties:
mean - returns the mean
std - returns the std
meanfull- returns the mean and std of the mean
Usage:
>>> foo = Welford()
>>> foo(range(100))
>>> foo
<Welford: 49.5 +- 29.0114919759>
>>> foo([1]*1000)
>>> foo
<Welford: 5.40909090909 +- 16.4437417146>
>>> foo.mean
5.409090909090906
>>> foo.std
16.44374171455467
>>> foo.meanfull
(5.409090909090906, 0.4957974674244838)
"""
def __init__(self):
self.k = 0
self.M = 0
self.S = 0
def add(self,x):
if x is None:
return
self.k += 1
newM = self.M + (x - self.M)*1./self.k
newS = self.S + (x - self.M)*(x - newM)
self.M, self.S = newM, newS
def mean(self):
return self.M
def meanfull(self):
return self.mean, self.std/np.sqrt(self.k)
def std(self):
if self.k==1:
return 0
return np.sqrt(self.S/(self.k-1))
def __repr__(self):
return "<Welford: {} +- {}>".format(self.mean, self.std)
def pretty(d, unit):
if unit is None:
return d
elif unit == BYTES:
return pretty_bytes(d)
elif unit == SECONDS:
return pretty_seconds(d)
else:
raise Error('No such unit: {}'.format(unit))
def pretty_bytes(b):
if b is None:
return None
assert isinstance(b, numbers.Number), "Surprising type for data: {} ({!r})".format(type(b), b)
if b > 1000 * 1000:
return '{:.0f}MB'.format(b/1000.0/1000.0)
elif b > 1000:
return '{:.0f}kB'.format(b/1000.0)
else:
return '{:.0f}B'.format(b)
def pretty_seconds(t):
a_t = abs(t)
if a_t < 0.001:
return '{:.2f}us'.format(1000*1000*t)
elif a_t < 1:
return '{:.2f}ms'.format(1000*t)
else:
return '{:.2f}s'.format(t)
def thread_id():
return threading.current_thread().ident
class StackProfile(object):
def __init__(self, profile):
self.profile = profile
self.stack_by_thread = {}
self.lock = threading.Lock()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.pop()
def push(self, event):
stack = self._current_stack()
stack.append({
'name': event,
'start': time.time(),
})
return self
def pop(self):
stack = self._current_stack()
event = stack.pop()
name = event['name']
start = event['start']
with self.profile as txn:
delta = time.time() - start
txn.timing(name, delta)
# These are now subsumed by the timers key
# txn.incr(name + '.total_time', delta, unit=SECONDS)
# txn.incr(name + '.calls')
def _current_stack(self):
id = thread_id()
try:
stack = self.stack_by_thread[id]
except KeyError:
with self.lock:
# Only current thread should be adding to this entry anyway
assert id not in self.stack_by_thread
stack = self.stack_by_thread[id] = []
return stack
class Profile(object):
def __init__(self, print_frequency=None, print_filter=None):
if print_filter is None:
print_filter = lambda event: True
self.lock = threading.RLock()
self.print_frequency = print_frequency
self.last_export = None
self.print_filter = print_filter
self._in_txn = False
self.reset()
def reset(self):
self.timers = {}
self.counters = {}
self.gauges = {}
def __enter__(self):
self.lock.acquire()
self._in_txn = True
return self
def __exit__(self, type, value, tb):
self._in_txn = False
self._print_if_needed()
self.lock.release()
def timing(self, event, time):
assert isinstance(event, six.string_types)
# return
with self.lock:
if event not in self.timers:
self.timers[event] = {
'total': 0,
'calls': 0,
'std': RunningVariance(),
}
self.timers[event]['total'] += time
self.timers[event]['calls'] += 1
self.timers[event]['std'].add(time)
self._print_if_needed()
def incr(self, event, amount=1, unit=None):
assert isinstance(event, six.string_types)
# return
with self.lock:
if event not in self.counters:
self.counters[event] = {
'total': 0,
'calls': 0,
'rate': ExponentialAverage(),
'unit': unit,
'std': RunningVariance(),
}
self.counters[event]['total'] += amount
self.counters[event]['calls'] += 1
self.counters[event]['rate'].add(amount)
self.counters[event]['std'].add(amount)
self._print_if_needed()
def gauge(self, event, value, delta=False, unit=None):
assert isinstance(event, six.string_types)
with self.lock:
if event not in self.gauges:
self.gauges[event] = {
'value': 0,
'calls': 0,
'unit': unit,
'std': RunningVariance(),
}
if delta:
self.gauges[event]['value'] += value
else:
self.gauges[event]['value'] = value
self.gauges[event]['calls'] += 1
self.gauges[event]['std'].add(value)
self._print_if_needed()
def _print_if_needed(self):
"""Assumes you hold the lock"""
if self._in_txn or self.print_frequency is None:
return
elif self.last_export is not None and \
self.last_export + self.print_frequency > time.time():
return
self.export()
def export(self, log=True, reset=True):
with self.lock:
if self.last_export is None:
self.last_export = time.time()
delta = time.time() - self.last_export
self.last_export = time.time()
timers = {}
for event, stat in self.timers.items():
timers[event] = {
'mean': stat['std'].mean(),
'std': stat['std'].std(),
'calls': stat['calls'],
'unit': 'seconds',
}
counters = {}
for counter, stat in self.counters.items():
counters[counter] = {
'calls': stat['calls'],
'std': stat['std'].std(),
'mean': stat['std'].mean(),
'unit': stat['unit'],
}
gauges = {}
for gauge, stat in self.gauges.items():
gauges[gauge] = {
'value': stat['value'],
'calls': stat['calls'],
'std': stat['std'].std(),
'mean': stat['std'].mean(),
'unit': stat['unit'],
}
export = {
'timers': timers,
'counters': counters,
'gauges': gauges,
'metadata': {
'period': delta,
}
}
if log:
self._print_export(export)
if reset:
self.reset()
return export
def _print_export(self, export):
timers = {}
for event, stat in sorted(export['timers'].items()):
if not self.print_filter(event):
continue
timers[event] = {
'mean': pretty_seconds(stat['mean']),
'std': pretty_seconds(stat['std']),
'calls': stat['calls'],
}
counters = collections.OrderedDict({})
for counter, stat in sorted(export['counters'].items()):
if not self.print_filter(counter):
continue
unit = stat['unit']
counters[counter] = {
'calls': stat['calls'],
'std': pretty(stat['std'], unit),
'mean': pretty(stat['mean'], unit),
}
gauges = collections.OrderedDict({})
for gauge, stat in sorted(export['gauges'].items()):
if not self.print_filter(gauge):
continue
unit = stat['unit']
gauges[gauge] = {
'value': pretty(stat['value'], unit),
'calls': stat['calls'],
'std': pretty(stat['std'], unit),
'mean': pretty(stat['mean'], unit),
}
# A bit of a hack, but we want this time to be as inclusive as
# possible.
export['metadata']['export_time'] = time.time() - self.last_export
# We do the explicit OrderedDict and json.dumps to order
# keys. Maybe there's a better way?
logger.info('[pyprofile] period=%s timers=%s counters=%s gauges=%s (export_time=%s)',
pretty_seconds(export['metadata']['period']),
json.dumps(timers), json.dumps(counters), json.dumps(gauges),
pretty_seconds(export['metadata']['export_time']),
)
print_frequency = os.environ.get('PYPROFILE_FREQUENCY')
if print_frequency is not None:
print_frequency = int(print_frequency)
print_prefix = os.environ.get('PYPROFILE_PREFIX')
if print_prefix is not None:
print_filter = lambda event: event.startswith(print_prefix)
else:
print_filter = None
profile = Profile(print_frequency=print_frequency, print_filter=print_filter)
stack_profile = StackProfile(profile)
push = stack_profile.push
pop = stack_profile.pop
incr = profile.incr
timing = profile.timing
gauge = profile.gauge
export = profile.export
| {
"repo_name": "rht/universe",
"path": "universe/pyprofile/__init__.py",
"copies": "1",
"size": "11881",
"license": "mit",
"hash": 7341790642809417000,
"line_mean": 29.2315521628,
"line_max": 98,
"alpha_frac": 0.5071121959,
"autogenerated": false,
"ratio": 4.019282814614344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002124929672167112,
"num_lines": 393
} |
"""A simple inspector overlay for plots
This module provides the SimpleInspectorOverlay for displaying
information gathered from an inspector tool in a TextGrid. By default
it is configured to work with a SimpleInspectorTool.
The module also provides some helper factory functions for creating text
formatters for dictionary values.
"""
from numpy import array
from traits.api import Any, List, Callable, Enum, Bool
from text_grid_overlay import TextGridOverlay
def basic_formatter(key, decimals):
"""Create a basic '<key>: <value>' formatting function
This factory creates a function that formats a specified key and with a
numerical value from a dictionary into a string.
Parameters
----------
key
The dictionary key to format.
decimals
The number of decimal places to show.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
format_string = '%s: %%(%s).%df' % (key, key, decimals)
def format(**kwargs):
return format_string % kwargs
return format
def datetime_formatter(key, time_format='%Y/%m/%d %H:%M:%S'):
"""Create a datetime formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
time_format
A format string suitable for strftime().
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
import datetime
def format(**kwargs):
dt = datetime.datetime.fromtimestamp(kwargs[key])
return key+': '+dt.strftime(time_format)
return format
def time_formatter(key):
"""Create a time formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a 'HH:MM:SS' format string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
return datetime_formatter(key, time_format='%H:%M:%S')
def date_formatter(key):
"""Create a date formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a 'yyyy/mm/dd' format string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
return datetime_formatter(key, time_format='%Y/%m/%d')
class SimpleInspectorOverlay(TextGridOverlay):
""" Simple inspector overlay for plots
This is a simple overlay that listens for new_value events on a
SimpleInspectorTool and displays formatted values in a grid.
By default this displays the 'x' and 'y' values provided by the
SimpleInspectorTool, but instances can provide a field_formatters
trait which is a list of lists of callables which extract values
from a dictionary and formats them. Each callable corresponds to a
cell in the underlying TextGrid component.
Although by default this works with the SimpleInspectorTool, with
appropriate field_formatters this class can be used with any inspector
tool that follows the same API.
"""
# XXX We should probably refactor this into a BaseInspectorOverlay
# which handles the visibility and basic event handling, and smaller
# version of this class which handles inserting values into a text grid
# the inspector that I am listening to. This should have a new_value
# event and a visible trait for me to listen to.
inspector = Any
# fields to display
field_formatters = List(List(Callable))
# Anchor the text to the mouse? (If False, then the text is in one of the
# corners.) Use the **align** trait to determine which corner.
tooltip_mode = Bool(False)
# The default state of the overlay is visible.
visible = True
# Whether the overlay should auto-hide and auto-show based on the
# tool's location, or whether it should be forced to be hidden or visible.
visibility = Enum("auto", True, False)
#########################################################################
# Traits Handlers
#########################################################################
def _field_formatters_default(self):
return [[basic_formatter('x', 2)], [basic_formatter('y', 2)]]
def _new_value_updated(self, event):
if event is None:
self.text_grid = array()
if self.visibility == "auto":
self.visibility = False
elif self.visibility == "auto":
self.visible = True
if self.tooltip_mode:
self.alternate_position = self.inspector.last_mouse_position
d = event
text = []
self.text_grid.string_array = array([[formatter(**d) for formatter in row]
for row in self.field_formatters])
self.text_grid.request_redraw()
def _visible_changed(self):
if self.component:
self.request_redraw()
def _inspector_changed(self, old, new):
if old:
old.on_trait_event(self._new_value_updated, 'new_value', remove=True)
old.on_trait_change(self._tool_visible_changed, "visible", remove=True)
if new:
new.on_trait_event(self._new_value_updated, 'new_value')
new.on_trait_change(self._tool_visible_changed, "visible")
self._tool_visible_changed()
def _tool_visible_changed(self):
self.visibility = self.inspector.visible
if self.visibility != "auto":
self.visible = self.visibility
| {
"repo_name": "ContinuumIO/chaco",
"path": "chaco/overlays/simple_inspector_overlay.py",
"copies": "3",
"size": "6043",
"license": "bsd-3-clause",
"hash": 1583001544072534800,
"line_mean": 30.4739583333,
"line_max": 83,
"alpha_frac": 0.647029621,
"autogenerated": false,
"ratio": 4.540195341848235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6687224962848235,
"avg_score": null,
"num_lines": null
} |
"""A simple interface for executing bytecodes over a Bluetooth serial port.
From the lms2012 source code documentation:
Beside running user programs the VM is able to execute direct commands from
the Communication Module. In fact direct commands are small programs that
consist of regular byte codes and they are executed in parallel with a running
user program. Special care MUST be taken when writing direct commands because
the decision until now is NOT to restrict the use of "dangerous" codes and
constructions (loops in a direct command are allowed).
If a new direct command from the same source is going to be executed an actual
running direct command is terminated.
Because of a small header objects are limited to one VMTHREAD only - SUBCALLs
and BLOCKs are, of course, not possible. This header contains information about
the number of global variables (for response), number of local variables, and
command size.
Direct commands that have data responses can place the data in the global
variable space. The global variable space is equal to the communication
response buffer. The composition of the direct command defines at which
offset the result is placed (global variable 0 is placed at offset 0 in
the buffer).
Offsets in the response buffer (global variables) must be aligned (i.e. 32bit
variable offsets are divisible by 4, 16bit variable offsets are divisible by 2).
All multi-byte words are little endian.
Direct Command bytes:
------------------------------
Byte 0 - 1: Command size
Byte 2 - 3: Message counter
Byte 4: CommandType
Byte 5 - 6: Number of global and local variables (compressed).
Byte 6 Byte 5
76543210 76543210
-------- --------
llllllgg gggggggg
gg gggggggg Global variables [0..MAX_COMMAND_GLOBALS]
llllll Local variables [0..MAX_COMMAND_LOCALS]
Byte 7 - n: Byte codes
Direct Command response Bytes:
------------------------------
Byte 0 - 1: Reply size
Byte 2 - 3: Message counter
Byte 4: ReplyType
Byte 5 - n: Response buffer (global variable values)
"""
import ev3
import message
MAX_CMD_LEN = 1019 # The size of the brick's txBuf is 1024 bytes but
# the header requires 5 bytes.
MAX_STR_LEN = 255
MAX_VERSION_STR_LEN = 64
MAX_LOCAL_VARIABLE_BYTES = 0xFFFFFFFF
MAX_NAME_STR_LEN = 64
MOTOR_MIN_POWER = -100
MOTOR_MAX_POWER = 100
MOTOR_MIN_SPEED = -100
MOTOR_MAX_SPEED = 100
USB_CHAIN_LAYER_MASTER = 0
USB_CHAIN_LAYER_SLAVE = 1
MOTOR_MIN_RATIO = -200
MOTOR_MAX_RATIO = 200
MIN_VOLUME = 0
MAX_VOLUME = 100
LCD_HEIGHT_PIXELS = 128
LCD_WIDTH_PIXELS = 178
class DirectCommandError(Exception):
"""Subclass for reporting errors."""
pass
class CommandType(object):
"""Every System Command must be one of these two types."""
DIRECT_COMMAND_REPLY = 0x00
DIRECT_COMMAND_NO_REPLY = 0x80
class ReplyType(object):
"""Every reply to a System Command must be one of these two types."""
DIRECT_REPLY = 0x02
DIRECT_REPLY_ERROR = 0x04
class OutputPort(object):
"""These can be OR'd together to operate on multiple ports at once."""
PORT_A = 0x01
PORT_B = 0x02
PORT_C = 0x04
PORT_D = 0x08
ALL = (PORT_A | PORT_B | PORT_C | PORT_D)
class InputPort(object):
"""These can be OR'd together to operate on multiple ports at once."""
PORT_1 = 0x00
PORT_2 = 0x01
PORT_3 = 0x02
PORT_4 = 0x03
PORT_A = 0x10
PORT_B = 0x11
PORT_C = 0x12
PORT_D = 0x13
class StopType(object):
"""When an OutputPort is stopped it can be told to brake or coast."""
COAST = 0
BRAKE = 1
class PolarityType(object):
""""""
BACKWARD = -1
TOGGLE = 0
FORWARD = 1
class TouchMode(object):
""""""
TOUCH = 0
BUMPS = 1
class NXTLightMode(object):
""""""
REFLECT = 0
AMBIENT = 1
class NXTSoundMode(object):
""""""
DECIBELS = 0
ADJUSTED_DECIBLES = 1
class NXTColorMode(object):
""""""
REFLECTIVE = 0
AMBIENT = 1
COLOR = 2
GREEN = 3
BLUE = 4
RAW = 5
class NXTUltrasonicMode(object):
""""""
CM = 0
INCHES = 1
class NXTTemperatureMode(object):
""""""
CELSIUS = 0
FAHRENHEIT = 1
class MotorMode(object):
""""""
DEGREES = 0
ROTATIONS = 1
PERCENT = 2
class UltrasonicMode(object):
""""""
CM = 0
INCH = 1
LISTEN = 2
class GyroMode(object):
""""""
ANGLE = 0
RATE = 1
FAS = 2
G_AND_A = 3
class IRMode(object):
""""""
PROXIMITY = 0
SEEK = 1
REMOTE = 2
REMOTE_A = 3
SALT = 4
CALIBRATION = 5
class ColorMode(object):
""""""
RELECTIVE = 0
AMBIENT = 1
COLOR = 2
class ColorSensorColor(object):
"""These are the results that the EV3 color sensor can return when operating
in ColorMode.COLOR.
"""
NONE = 0
BLACK = 1
BLUE = 2
GREEN = 3
YELLOW = 4
RED = 5
WHITE = 6
BROWN = 7
class LEDPattern(object):
"""The brick user interface has several status LEDs."""
OFF = 0
GREEN = 1
RED = 2
ORANGE = 3
FLASHING_GREEN = 4
FLASHING_RED = 5
FLASHING_ORANGE = 6
GREEN_HEARTBEAT = 7
RED_HEARTBEAT = 8
ORANGE_HEARTBEAT = 9
class DeviceType(object):
"""These are the known device types.
NOTE: These have only been partially confirmed.
"""
NXT_TOUCH = 0x01
NXT_LIGHT = 0x02
NXT_SOUND = 0x03
NXT_COLOR = 0x04
NXT_ULTRASONIC = 0x05
NXT_TEMPERATURE = 0x06
TACHO = 0x07 # TYPE_TACHO in lms2012.h
MINI_TACHO = 0x08 # TYPE_MINITACHO in lms2012.h
NEW_TACHO = 0x09 # TYPE_NEWTACHO in lms2012.h
EV3_TOUCH = 0x10
EV3_COLOR = 0x1D
EV3_ULTRASONIC = 0x1E
EV3_GYROSCOPE = 0x20
EV3_INFRARED = 0x21
SENSOR_INITIALIZING = 0x7D
PORT_EMPTY = 0x7E
ERROR_PORT = 0x7F
UNKNOWN = 0xFF
class LCDColor(object):
"""The brick's LCD only displays two colors."""
BACKGROUND = 0
FOREGROUND = 1
class ButtonType(object):
"""The brick's user interface contains 6 buttons."""
NO_BUTTON = 0
UP_BUTTON = 1
ENTER_BUTTON = 2
DOWN_BUTTON = 3
RIGHT_BUTTON = 4
LEFT_BUTTON = 5
BACK_BUTTON = 6
ANY_BUTTON = 7
class MathType(object):
""""""
EXP = 1 # e^x r = expf(x)
MOD = 2 # Modulo r = fmod(x,y)
FLOOR = 3 # Floor r = floor(x)
CEIL = 4 # Ceiling r = ceil(x)
ROUND = 5 # Round r = round(x)
ABS = 6 # Absolute r = fabs(x)
NEGATE = 7 # Negate r = 0.0 - x
SQRT = 8 # Squareroot r = sqrt(x)
LOG = 9 # Log r = log10(x)
LN = 10 # Ln r = log(x)
SIN = 11
COS = 12
TAN = 13
ASIN = 14
ACOS = 15
ATAN = 16
MOD8 = 17 # Modulo DATA8 r = x % y
MOD16 = 18 # Modulo DATA16 r = x % y
MOD32 = 19 # Modulo DATA32 r = x % y
POW = 20 # Exponent r = powf(x,y)
TRUNC = 21 # Truncate r = (float)((int)(x * pow(y))) / pow(y)
class BrowserType(object):
""""""
BROWSE_FOLDERS = 0 # Browser for folders
BROWSE_FOLDS_FILES = 1 # Browser for folders and files
BROWSE_CACHE = 2 # Browser for cached / recent files
BROWSE_FILES = 3 # Browser for files
class Icon(object):
"""The icons on the brick are enumerated by value."""
ICON_NONE = -1
ICON_RUN = 0
ICON_FOLDER = 1
ICON_FOLDER2 = 2
ICON_USB = 3
ICON_SD = 4
ICON_SOUND = 5
ICON_IMAGE = 6
ICON_SETTINGS = 7
ICON_ONOFF = 8
ICON_SEARCH = 9
ICON_WIFI = 10
ICON_CONNECTIONS = 11
ICON_ADD_HIDDEN = 12
ICON_TRASHBIN = 13
ICON_VISIBILITY = 14
ICON_KEY = 15
ICON_CONNECT = 16
ICON_DISCONNECT = 17
ICON_UP = 18
ICON_DOWN = 19
ICON_WAIT1 = 20
ICON_WAIT2 = 21
ICON_BLUETOOTH = 22
ICON_INFO = 23
ICON_TEXT = 24
ICON_QUESTIONMARK = 27
ICON_INFO_FILE = 28
ICON_DISC = 29
ICON_CONNECTED = 30
ICON_OBP = 31
ICON_OBD = 32
ICON_OPENFOLDER = 33
ICON_BRICK1 = 34
class FontType(object):
""""""
NORMAL_FONT = 0
SMALL_FONT = 1
LARGE_FONT = 2
TINY_FONT = 3
class DataFormat(object):
"""Data formats that are used by the VM."""
DATA8 = 0x00
DATA16 = 0x01
DATA32 = 0x02
DATA_F = 0x03 # 32bit floating point value (single precision)
DATA_S = 0x04 # Zero terminated string
DATA_A = 0x05 # Array handle
DATA_V = 0x07 # Variable type
DATA_PCT = 0x10 # Percent (used in INPUT_READEXT)
DATA_RAW = 0x12 # Raw (used in INPUT_READEXT)
DATA_SI = 0x13 # SI unit (used in INPUT_READEXT)
# Values used by this Python module only:
HND = 0xFF # For compatibility with ParamTypes.
BOOL = 0xFE # For converting to Python values
class ParamType(object):
"""Parameter types that are used by the VM."""
PRIMPAR_LABEL = 0x20
HND = 0x10 # 8bit handle index (i.e. pointer to a string)
ADR = 0x08 # 3bit address
LCS = 0x84 # Null terminated string
LAB1 = 0xA0
LC0 = 0x00 # 6bit immediate
LC1 = 0x81 # 8bit immediate
LC2 = 0x82 # 16bit immediate
LC4 = 0x83 # 32bit immediate
LCA = 0x81 # 8bit pointer to local array
LV1 = 0xC1 # 8bit pointer to local value
LV2 = 0xC2 # 16bit pointer to local value
LV4 = 0xC3 # 32bit pointer to local value
LVA = 0xC1 # 8bit pointer to local array
GV0 = 0x60 # 5bit pointer to global value
GV1 = 0xE1 # 8bit pointer to global value
GV2 = 0xE2 # 16bit pointer to global value
GV4 = 0xE3 # 32bit pointer to global value
GVA = 0xE1 # 8bit pointer to global array
# Values used by this Python module only:
FLOAT = 0xFF # 32bit floating point value (single precision)
# Defines the number of bytes required to represent each DataFormat.
PARAM_TYPE_LENS = { ParamType.PRIMPAR_LABEL: None,
ParamType.HND: 1,
ParamType.ADR: 1,
ParamType.LCS: None,
ParamType.LAB1: 1,
ParamType.LC0: 0,
ParamType.LC1: 1,
ParamType.LC2: 2,
ParamType.LC4: 4,
ParamType.LCA: 1,
ParamType.LV1: 1,
ParamType.LV2: 2,
ParamType.LV4: 4,
ParamType.LVA: 1,
ParamType.GV0: 0,
ParamType.GV1: 1,
ParamType.GV2: 2,
ParamType.GV4: 4,
ParamType.GVA: 1,
ParamType.FLOAT: 4 }
DATA_FORMAT_LENS = { DataFormat.DATA8: 1,
DataFormat.DATA16: 2,
DataFormat.DATA32: 4,
DataFormat.DATA_F: 4,
DataFormat.DATA_S: None,
DataFormat.DATA_A: None,
DataFormat.DATA_V: None,
DataFormat.DATA_PCT: 1,
DataFormat.DATA_RAW: 4,
DataFormat.DATA_SI: 4,
DataFormat.HND: 1,
DataFormat.BOOL: 1 }
# There are two ways to specify an output in the c_output module. The first is
# as a bit mask and the second is by index.
OUTPUT_CHANNEL_TO_INDEX = { OutputPort.PORT_A: 0,
OutputPort.PORT_B: 1,
OutputPort.PORT_C: 2,
OutputPort.PORT_D: 3 }
class UIReadSubcode(object):
""""""
GET_VBATT = 1
GET_IBATT = 2
GET_OS_VERS = 3
GET_EVENT = 4
GET_TBATT = 5
GET_IINT = 6
GET_IMOTOR = 7
GET_STRING = 8
GET_HW_VERS = 9
GET_FW_VERS = 10
GET_FW_BUILD = 11
GET_OS_BUILD = 12
GET_ADDRESS = 13
GET_CODE = 14
KEY = 15
GET_SHUTDOWN = 16
GET_WARNING = 17
GET_LBATT = 18
TEXTBOX_READ = 21
GET_VERSION = 26
GET_IP = 27
GET_POWER = 29
GET_SDCARD = 30
GET_USBSTICK = 31
class UIWriteSubcode(object):
""""""
WRITE_FLUSH = 1
FLOATVALUE = 2
STAMP = 3
PUT_STRING = 8
VALUE8 = 9
VALUE16 = 10
VALUE32 = 11
VALUEF = 12
ADDRESS = 13
CODE = 14
DOWNLOAD_END = 15
SCREEN_BLOCK = 16
TEXTBOX_APPEND = 21
SET_BUSY = 22
SET_TESTPIN = 24
INIT_RUN = 25
UPDATE_RUN = 26
LED = 27
POWER = 29
GRAPH_SAMPLE = 30
TERMINAL = 31
class UIButtonSubcode(object):
""""""
SHORTPRESS = 1
LONGPRESS = 2
WAIT_FOR_PRESS = 3
FLUSH = 4
PRESS = 5
RELEASE = 6
GET_HORZ = 7
GET_VERT = 8
PRESSED = 9
SET_BACK_BLOCK = 10
GET_BACK_BLOCK = 11
TESTSHORTPRESS = 12
TESTLONGPRESS = 13
GET_BUMBED = 14
GET_CLICK = 15
class COMGetSubcodes(object):
""""""
GET_ON_OFF = 1 # Set, Get
GET_VISIBLE = 2 # Set, Get
GET_RESULT = 4 # Get
GET_PIN = 5 # Set, Get
SEARCH_ITEMS = 8 # Get
SEARCH_ITEM = 9 # Get
FAVOUR_ITEMS = 10 # Get
FAVOUR_ITEM = 11 # Get
GET_ID = 12
GET_BRICKNAME = 13
GET_NETWORK = 14
GET_PRESENT = 15
GET_ENCRYPT = 16
CONNEC_ITEMS = 17
CONNEC_ITEM = 18
GET_INCOMING = 19
GET_MODE2 = 20
class COMSetSubcode(object):
""""""
SET_ON_OFF = 1 # Set, Get
SET_VISIBLE = 2 # Set, Get
SET_SEARCH = 3 # Set
SET_PIN = 5 # Set, Get
SET_PASSKEY = 6 # Set
SET_CONNECTION = 7 # Set
SET_BRICKNAME = 8
SET_MOVEUP = 9
SET_MOVEDOWN = 10
SET_ENCRYPT = 11
SET_SSID = 12
SET_MODE2 = 13
class InputDeviceSubcode(object):
""""""
GET_FORMAT = 2
CAL_MINMAX = 3
CAL_DEFAULT = 4
GET_TYPEMODE = 5
GET_SYMBOL = 6
CAL_MIN = 7
CAL_MAX = 8
SETUP = 9 # Probably only for internal use.
CLR_ALL = 10 # Resets counters, angle, etc.
GET_RAW = 11
GET_CONNECTION = 12
STOP_ALL = 13 # Stops any attached motors?
GET_NAME = 21
GET_MODENAME = 22
SET_RAW = 23
GET_FIGURES = 24
GET_CHANGES = 25
CLR_CHANGES = 26
READY_PCT = 27
READY_RAW = 28
READY_SI = 29
GET_MINMAX = 30
GET_BUMPS = 31
class ProgramInfoSubcode(object):
""""""
OBJ_STOP = 0
OBJ_START = 4
GET_STATUS = 22
GET_SPEED = 23
GET_PRGRESULT = 24
SET_INSTR = 25
class UIDrawSubcode(object):
""""""
UPDATE = 0
CLEAN = 1
PIXEL = 2
LINE = 3
CIRCLE = 4
TEXT = 5
ICON = 6
PICTURE = 7
VALUE = 8
FILLRECT = 9
RECT = 10
NOTIFICATION = 11
QUESTION = 12
KEYBOARD = 13
BROWSE = 14
VERTBAR = 15
INVERSERECT = 16
SELECT_FONT = 17
TOPLINE = 18
FILLWINDOW = 19
SCROLL = 20
DOTLINE = 21
VIEW_VALUE = 22
VIEW_UNIT = 23
FILLCIRCLE = 24
STORE = 25
RESTORE = 26
ICON_QUESTION = 27
BMPFILE = 28
POPUP = 29
GRAPH_SETUP = 30
GRAPH_DRAW = 31
TEXTBOX = 32
class FileSubcode(object):
""""""
OPEN_APPEND = 0
OPEN_READ = 1
OPEN_WRITE = 2
READ_VALUE = 3
WRITE_VALUE = 4
READ_TEXT = 5
WRITE_TEXT = 6
CLOSE = 7
LOAD_IMAGE = 8
GET_HANDLE = 9
MAKE_FOLDER = 10
GET_POOL = 11
SET_LOG_SYNC_TIME = 12
GET_FOLDERS = 13
GET_LOG_SYNC_TIME = 14
GET_SUBFOLDER_NAME = 15
WRITE_LOG = 16
CLOSE_LOG = 17
GET_IMAGE = 18
GET_ITEM = 19
GET_CACHE_FILES = 20
PUT_CACHE_FILE = 21
GET_CACHE_FILE = 22
DEL_CACHE_FILE = 23
DEL_SUBFOLDER = 24
GET_LOG_NAME = 25
OPEN_LOG = 27
READ_BYTES = 28
WRITE_BYTES = 29
REMOVE = 30
MOVE = 31
class ArraySubcode(object):
""""""
DELETE = 0
CREATE8 = 1
CREATE16 = 2
CREATE32 = 3
CREATEF = 4
RESIZE = 5
FILL = 6
COPY = 7
INIT8 = 8
INIT16 = 9
INIT32 = 10
INITF = 11
SIZE = 12
READ_CONTENT = 13
WRITE_CONTENT = 14
READ_SIZE = 15
class FilenameSubcode(object):
""""""
EXIST = 16 # MUST BE GREATER OR EQUAL TO "ARRAY_SUBCODES"
TOTALSIZE = 17
SPLIT = 18
MERGE = 19
CHECK = 20
PACK = 21
UNPACK = 22
GET_FOLDERNAME = 23
class InfoSubcode(object):
""""""
SET_ERROR = 1
GET_ERROR = 2
ERRORTEXT = 3
GET_VOLUME = 4
SET_VOLUME = 5
GET_MINUTES = 6
SET_MINUTES = 7
class SoundSubcode(object):
""""""
BREAK = 0
TONE = 1
PLAY = 2
REPEAT = 3
SERVICE = 4
class StringSubcode(object):
""""""
GET_SIZE = 1 # Get string size
ADD = 2 # Add two strings
COMPARE = 3 # Compare two strings
DUPLICATE = 5 # Duplicate one string to another
VALUE_TO_STRING = 6
STRING_TO_VALUE = 7
STRIP = 8
NUMBER_TO_STRING = 9
SUB = 10
VALUE_FORMATTED = 11
NUMBER_FORMATTED = 12
class TstSubcode(object):
""""""
TST_OPEN = 10 # Must >= "INFO_SUBCODES"
TST_CLOSE = 11
TST_READ_PINS = 12
TST_WRITE_PINS = 13
TST_READ_ADC = 14
TST_WRITE_UART = 15
TST_READ_UART = 16
TST_ENABLE_UART = 17
TST_DISABLE_UART = 18
TST_ACCU_SWITCH = 19
TST_BOOT_MODE2 = 20
TST_POLL_MODE2 = 21
TST_CLOSE_MODE2 = 22
TST_RAM_CHECK = 23
class Opcode(object):
"""All of the opcodes that are used by the VM."""
ERROR = 0x00
NOP = 0x01
PROGRAM_STOP = 0x02
PROGRAM_START = 0x03
OBJECT_STOP = 0x04
OBJECT_START = 0x05
OBJECT_TRIG = 0x06
OBJECT_WAIT = 0x07
RETURN = 0x08
CALL = 0x09
OBJECT_END = 0x0A
SLEEP = 0x0B
PROGRAM_INFO = 0x0C
LABEL = 0x0D
PROBE = 0x0E
DO = 0x0F
# MATH
ADD8 = 0x10
ADD16 = 0x11
ADD32 = 0x12
ADDF = 0x13
SUB8 = 0x14
SUB16 = 0x15
SUB32 = 0x16
SUBF = 0x17
MUL8 = 0x18
MUL16 = 0x19
MUL32 = 0x1A
MULF = 0x1B
DIV8 = 0x1C
DIV16 = 0x1D
DIV32 = 0x1E
DIVF = 0x1F
# LOGIC
OR8 = 0x20
OR16 = 0x21
OR32 = 0x22
AND8 = 0x24
AND16 = 0x25
AND32 = 0x26
XOR8 = 0x28
XOR16 = 0x29
XOR32 = 0x2A
RL8 = 0x2C
RL16 = 0x2D
RL32 = 0x2E
# MOVE
INIT_BYTES = 0x2F
MOVE8_8 = 0x30
MOVE8_16 = 0x31
MOVE8_32 = 0x32
MOVE8_F = 0x33
MOVE16_8 = 0x34
MOVE16_16 = 0x35
MOVE16_32 = 0x36
MOVE16_F = 0x37
MOVE32_8 = 0x38
MOVE32_16 = 0x39
MOVE32_32 = 0x3A
MOVE32_F = 0x3B
MOVEF_8 = 0x3C
MOVEF_16 = 0x3D
MOVEF_32 = 0x3E
MOVEF_F = 0x3F
# BRANCH
JR = 0x40
JR_FALSE = 0x41
JR_TRUE = 0x42
JR_NAN = 0x43
# COMPARE
CP_LT8 = 0x44
CP_LT16 = 0x45
CP_LT32 = 0x46
CP_LTF = 0x47
CP_GT8 = 0x48
CP_GT16 = 0x49
CP_GT32 = 0x4A
CP_GTF = 0x4B
CP_EQ8 = 0x4C
CP_EQ16 = 0x4D
CP_EQ32 = 0x4E
CP_EQF = 0x4F
CP_NEQ8 = 0x50
CP_NEQ16 = 0x51
CP_NEQ32 = 0x52
CP_NEQF = 0x53
CP_LTEQ8 = 0x54
CP_LTEQ16 = 0x55
CP_LTEQ32 = 0x56
CP_LTEQF = 0x57
CP_GTEQ8 = 0x58
CP_GTEQ16 = 0x59
CP_GTEQ32 = 0x5A
CP_GTEQF = 0x5B
# SELECT
SELECT8 = 0x5C
SELECT16 = 0x5D
SELECT32 = 0x5E
SELECTF = 0x5F
SYSTEM = 0x60
PORT_CNV_OUTPUT = 0x61
PORT_CNV_INPUT = 0x62
NOTE_TO_FREQ = 0x63
# BRANCH
JR_LT8 = 0x64
JR_LT16 = 0x65
JR_LT32 = 0x66
JR_LTF = 0x67
JR_GT8 = 0x68
JR_GT16 = 0x69
JR_GT32 = 0x6A
JR_GTF = 0x6B
JR_EQ8 = 0x6C
JR_EQ16 = 0x6D
JR_EQ32 = 0x6E
JR_EQF = 0x6F
JR_NEQ8 = 0x70
JR_NEQ16 = 0x71
JR_NEQ32 = 0x72
JR_NEQF = 0x73
JR_LTEQ8 = 0x74
JR_LTEQ16 = 0x75
JR_LTEQ32 = 0x76
JR_LTEQF = 0x77
JR_GTEQ8 = 0x78
JR_GTEQ16 = 0x79
JR_GTEQ32 = 0x7A
JR_GTEQF = 0x7B
# VM
INFO = 0x7C
STRINGS = 0x7D
MEMORY_WRITE = 0x7E
MEMORY_READ = 0x7F
# UI
UI_FLUSH = 0x80
UI_READ = 0x81
UI_WRITE = 0x82
UI_BUTTON = 0x83
UI_DRAW = 0x84
# TIMER
TIMER_WAIT = 0x85
TIMER_READY = 0x86
TIMER_READ = 0x87
# BREAKPOINT
BP0 = 0x88
BP1 = 0x89
BP2 = 0x8A
BP3 = 0x8B
BP_SET = 0x8C
MATH = 0x8D
RANDOM = 0x8E
# TIMER
TIMER_READ_US = 0x8F
# UI
KEEP_ALIVE = 0x90
# COM
COM_READ = 0x91
COM_WRITE = 0x92
# SOUND
SOUND = 0x94
SOUND_TEST = 0x95
SOUND_READY = 0x96
# INPUT
INPUT_SAMPLE = 0x97
INPUT_DEVICE_LIST = 0x98
INPUT_DEVICE = 0x99
INPUT_READ = 0x9A
INPUT_TEST = 0x9B
INPUT_READY = 0x9C
INPUT_READSI = 0x9D
INPUT_READEXT = 0x9E
INPUT_WRITE = 0x9F
# OUTPUT
OUTPUT_GET_TYPE = 0xA0
OUTPUT_SET_TYPE = 0xA1
OUTPUT_RESET = 0xA2
OUTPUT_STOP = 0xA3
OUTPUT_POWER = 0xA4
OUTPUT_SPEED = 0xA5
OUTPUT_START = 0xA6
OUTPUT_POLARITY = 0xA7
OUTPUT_READ = 0xA8
OUTPUT_TEST = 0xA9
OUTPUT_READY = 0xAA
OUTPUT_POSITION = 0xAB
OUTPUT_STEP_POWER = 0xAC
OUTPUT_TIME_POWER = 0xAD
OUTPUT_STEP_SPEED = 0xAE
OUTPUT_TIME_SPEED = 0xAF
OUTPUT_STEP_SYNC = 0xB0
OUTPUT_TIME_SYNC = 0xB1
OUTPUT_CLR_COUNT = 0xB2
OUTPUT_GET_COUNT = 0xB3
OUTPUT_PRG_STOP = 0xB4
# MEMORY
FILE = 0xC0
ARRAY = 0xC1
ARRAY_WRITE = 0xC2
ARRAY_READ = 0xC3
ARRAY_APPEND = 0xC4
MEMORY_USAGE = 0xC5
FILENAME = 0xC6
# READ
READ8 = 0xC8
READ16 = 0xC9
READ32 = 0xCA
READF = 0xCB
# WRITE
WRITE8 = 0xCC
WRITE16 = 0xCD
WRITE32 = 0xCE
WRITEF = 0xCF
# COM
COM_READY = 0xD0
COM_READDATA = 0xD1
COM_WRITEDATA = 0xD2
COM_GET = 0xD3
COM_SET = 0xD4
COM_TEST = 0xD5
COM_REMOVE = 0xD6
COM_WRITEFILE = 0xD7
MAILBOX_OPEN = 0xD8
MAILBOX_WRITE = 0xD9
MAILBOX_READ = 0xDA
MAILBOX_TEST = 0xDB
MAILBOX_READY = 0xDC
MAILBOX_CLOSE = 0xDD
# SPARE
TST = 0xFF
class DirectCommand(object):
"""Handles variable allocation and parameters for commands that can consist
of arbitrary bytecodes.
TODO: Better param verification?
"""
# These are inserted into the _global_params_types list so that commands
# that return mulitple values can have their values bundled together into
# tuples before they are returned.
_REPLY_TUPLE_OPEN_TOKEN = '_('
_REPLY_TUPLE_CLOSE_TOKEN = ')_'
def __init__(self):
"""Constructs a new, empty object."""
self._global_params_types = []
self._local_params_byte_count = 0
self._global_params_byte_count = 0
# Allocate space for the CommandType.
self._msg = [0x00]
# Allocate space for global and local param lengths.
self._msg.append(0x00)
self._msg.append(0x00)
def send(self, ev3_object):
"""Sends the message and parses the reply."""
if (2 == len(self._msg)):
raise DirectCommandError('Attempt to send an empty DirectCommand.')
self._msg[1] = (self._global_params_byte_count & 0xFF)
self._msg[2] = ((self._local_params_byte_count << 2) |
((self._global_params_byte_count >> 8) & 0x03))
if (self._global_params_byte_count):
self._msg[0] = CommandType.DIRECT_COMMAND_REPLY
reply = ev3_object.send_message_for_reply(self._msg)
return self._parse_reply(reply)
else:
self._msg[0] = CommandType.DIRECT_COMMAND_NO_REPLY
ev3_object.send_message(self._msg)
def safe_add(fn):
"""A wrapper for adding commands in a safe manner."""
def checked_add(*args):
# Wrappers aren't bound methods so they can't reference 'self'
# directly. However, 'self' will be provided as the first parameter
# when the wrapped method is called.
_self = args[0]
msg_len = len(_self._msg)
global_params_types_len = len(_self._global_params_types)
local_params_byte_count = _self._local_params_byte_count
global_params_byte_count = _self._global_params_byte_count
fn(*args)
if ((MAX_CMD_LEN < len(_self._msg)) or
(MAX_CMD_LEN < _self._global_params_byte_count) or
(MAX_LOCAL_VARIABLE_BYTES < _self._local_params_byte_count)):
del (_self._msg[msg_len:])
del (_self._global_params_types[global_params_types_len:])
_self._local_params_byte_count = local_params_byte_count
_self._global_params_byte_count = global_params_byte_count
raise DirectCommandError('Not enough space to add the ' +
'given func.')
return checked_add
@safe_add
def add_timer_wait(self, milliseconds):
"""Causes the thread to sleep for the specified number of milliseconds.
"""
local_var_tuple = self._allocate_local_param(DataFormat.DATA32)
self._msg.append(Opcode.TIMER_WAIT)
self._append_local_constant(milliseconds)
self._append_param(*local_var_tuple)
self._msg.append(Opcode.TIMER_READY)
self._append_param(*local_var_tuple)
@safe_add
def add_ui_draw_update(self):
"""Updates the screen (applies whatever drawing commands have been
issued since the last update).
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.UPDATE)
@safe_add
def add_ui_draw_clean(self):
"""Fills the screen with LCDColor.BACKGROUND."""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.CLEAN)
@safe_add
def add_ui_draw_fillwindow(self, lcd_color, start_y, count):
"""Fills the window with count rows of the given LCDColor starting at
row start_y.
NOTE: Starting at 0 with a size of 0 will clear the window. This seems
to be the way the CLEAN command is implemented.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.FILLWINDOW)
self._append_param(lcd_color)
self._append_param(start_y, ParamType.LC2)
self._append_param(count, ParamType.LC2)
@safe_add
def add_ui_draw_pixel(self, lcd_color, xy):
"""Draws a pixel at the given (x, y)."""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.PIXEL)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
@safe_add
def add_ui_draw_line(self, lcd_color, start_xy, end_xy):
"""Draws a line from the start (x, y) to the end (x, y)."""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.LINE)
self._append_param(lcd_color)
self._append_param(start_xy[0], ParamType.LC2)
self._append_param(start_xy[1], ParamType.LC2)
self._append_param(end_xy[0], ParamType.LC2)
self._append_param(end_xy[1], ParamType.LC2)
@safe_add
def add_ui_draw_dotline(self, lcd_color,
start_xy,
end_xy,
on_pixels,
off_pixels):
"""Draws a line from the start (x, y) to the end (x, y). The line will
be composed of a repeating pattern consisting of on_pixels followed by
off_pixels.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.DOTLINE)
self._append_param(lcd_color)
self._append_param(start_xy[0], ParamType.LC2)
self._append_param(start_xy[1], ParamType.LC2)
self._append_param(end_xy[0], ParamType.LC2)
self._append_param(end_xy[1], ParamType.LC2)
self._append_param(on_pixels, ParamType.LC2)
self._append_param(off_pixels, ParamType.LC2)
@safe_add
def add_ui_draw_rect(self, lcd_color, xy, width, height):
"""Draws a rectangle with (x, y) as the top-left corner and with width
and height dimensions.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.RECT)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(width, ParamType.LC2)
self._append_param(height, ParamType.LC2)
@safe_add
def add_ui_draw_fillrect(self, lcd_color, xy, width, height):
"""Draws a filled rectangle with (x, y) as the top-left corner and
with width and height dimensions.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.FILLRECT)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(width, ParamType.LC2)
self._append_param(height, ParamType.LC2)
@safe_add
def add_ui_draw_inverserect(self, xy, width, height):
"""Draws a rectangle with (x, y) as the top-left corner and with width
and height dimensions. Any pixel that this rectangle overlaps will have
its color flipped.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.INVERSERECT)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(width, ParamType.LC2)
self._append_param(height, ParamType.LC2)
@safe_add
def add_ui_draw_circle(self, lcd_color, xy, radius):
"""Draws a circle centered at (x, y) with the specified radius."""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.CIRCLE)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(radius, ParamType.LC2)
@safe_add
def add_ui_draw_fillcircle(self, lcd_color, xy, radius):
"""Draws a filled circle centered at (x, y) with the specified radius.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.FILLCIRCLE)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(radius, ParamType.LC2)
@safe_add
def add_ui_draw_selectfont(self, font_type):
"""Selects the FontType that will be used by following calls to
add_ui_draw_text.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.SELECT_FONT)
self._append_param(font_type)
@safe_add
def add_ui_draw_text(self, lcd_color, xy, text_str):
"""Draws the given text with (x, y) as the top-left corner of the
bounding box. Use add_ui_draw_selectfont to select the font.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.TEXT)
self._append_param(lcd_color)
self._append_param(xy[0], ParamType.LC2)
self._append_param(xy[1], ParamType.LC2)
self._append_param(text_str, ParamType.LCS)
@safe_add
def add_ui_draw_topline(self, topline_enabled):
"""Enables or disables the display of the menu bar at the top of the
screen that normally displays status icons such as the battery
indicator.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.TOPLINE)
self._append_param(int(topline_enabled))
@safe_add
def add_ui_draw_store(self, ui_level_no=0):
"""If ui_level_no is zero then this function saves the current screen
content so that it be restored later using add_ui_draw_restore.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.STORE)
self._append_param(ui_level_no)
@safe_add
def add_ui_draw_restore(self, ui_level_no=0):
"""Restores the screen content that was previously saved using
add_ui_draw_store.
"""
self._msg.append(Opcode.UI_DRAW)
self._msg.append(UIDrawSubcode.RESTORE)
self._append_param(ui_level_no)
@safe_add
def add_ui_button_pressed(self, button_type):
"""Returns True if the specified ButtonType button is being pressed."""
self._msg.append(Opcode.UI_BUTTON)
self._msg.append(UIButtonSubcode.PRESSED)
self._append_param(button_type)
self._append_reply_param(DataFormat.BOOL)
@safe_add
def add_keep_alive(self):
"""Resets the sleep timer and returns the sleep timer's new value in
minutes.
"""
self._msg.append(Opcode.KEEP_ALIVE)
self._append_reply_param(DataFormat.DATA8)
@safe_add
def add_input_device_get_typemode(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns the DeviceType and mode for the given input_port. The mode
value depends on the type of the device.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_TYPEMODE)
self._append_param(layer)
self._append_param(input_port)
self._global_params_types.append(self._REPLY_TUPLE_OPEN_TOKEN)
self._append_reply_param(DataFormat.DATA8)
self._append_reply_param(DataFormat.DATA8)
self._global_params_types.append(self._REPLY_TUPLE_CLOSE_TOKEN)
@safe_add
def add_input_device_get_name(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns a string describing the device that is located at the
specified port i.e. 'NONE' or 'US-DIST-CM'.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_NAME)
self._append_param(layer)
self._append_param(input_port)
self._append_param(MAX_NAME_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_input_device_get_modename(self, input_port,
mode,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns a string describing the specified mode of the device that is
located at the given port. For example, for an ultrasonic sensor mode
0 will return 'US-DIST-CM' and mode 1 will return 'US-DIST-IN'.
NOTE: Reading invalid modes can corrupt the reply buffer.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_MODENAME)
self._append_param(layer)
self._append_param(input_port)
self._append_param(mode)
self._append_param(MAX_NAME_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_input_device_get_minmax(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
""""""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_MINMAX)
self._append_param(layer)
self._append_param(input_port)
self._global_params_types.append(self._REPLY_TUPLE_OPEN_TOKEN)
self._append_reply_param(DataFormat.DATA_F)
self._append_reply_param(DataFormat.DATA_F)
self._global_params_types.append(self._REPLY_TUPLE_CLOSE_TOKEN)
@safe_add
def add_input_device_get_changes(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns the number of positive changes since the last time
CLR_CHANGES was called (i.e. the number of times that a touch sensor
has been pressed).
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_CHANGES)
self._append_param(layer)
self._append_param(input_port)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_input_device_get_bumps(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns the number of negative changes since the last time
CLR_CHANGES was called (i.e. the number of times that a touch sensor
has been released).
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.GET_BUMPS)
self._append_param(layer)
self._append_param(input_port)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_input_device_clr_changes(self, input_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns the number of negative changes since the last time
CLR_CHANGES was called (i.e. the number of times that a touch sensor
has been released).
NOTE: Does not clear the accumulated angle measurement for the EV3
gyro sensor.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.CLR_CHANGES)
self._append_param(layer)
self._append_param(input_port)
@safe_add
def add_input_device_clr_all(self, layer=USB_CHAIN_LAYER_MASTER):
"""Clears all of the input device values."""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.CLR_ALL)
self._append_param(layer)
@safe_add
def add_input_device_ready_si(self, input_port,
mode=-1,
device_type=0,
layer=USB_CHAIN_LAYER_MASTER):
"""Waits until the device on the specified InputPort is ready and then
returns its value as a standard unit.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.READY_SI)
self._append_param(layer)
self._append_param(input_port)
self._append_param(device_type)
self._append_param(mode)
self._append_param(1) # Number of values
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_input_device_ready_raw(self, input_port,
mode=-1,
device_type=0,
layer=USB_CHAIN_LAYER_MASTER):
"""Waits until the device on the specified InputPort is ready and then
returns its value as a raw value.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.READY_RAW)
self._append_param(layer)
self._append_param(input_port)
self._append_param(device_type)
self._append_param(mode)
self._append_param(1) # Number of values
self._append_reply_param(DataFormat.DATA32)
@safe_add
def add_input_device_ready_percent(self, input_port,
mode=-1,
device_type=0,
layer=USB_CHAIN_LAYER_MASTER):
"""Waits until the device on the specified InputPort is ready and then
returns its value as a percentage.
"""
self._msg.append(Opcode.INPUT_DEVICE)
self._msg.append(InputDeviceSubcode.READY_PCT)
self._append_param(layer)
self._append_param(input_port)
self._append_param(device_type)
self._append_param(mode)
self._append_param(1) # Number of values
self._append_reply_param(DataFormat.DATA8)
@safe_add
def add_sound_tone(self, volume,
frequency_hz,
duration_ms,
layer=USB_CHAIN_LAYER_MASTER):
"""Plays the tone at the given volume and frequency for the given
duration_ms milliseconds.
"""
self._msg.append(Opcode.SOUND)
self._msg.append(SoundSubcode.TONE)
self._append_param(volume)
self._append_param(frequency_hz, ParamType.LC2)
self._append_param(duration_ms, ParamType.LC2)
@safe_add
def add_sound_play(self, volume, filename):
"""Plays the sound file with the given name at the specified volume.
The default sound files are located in the '/home/root/lms2012/sys/ui/'
directory and include Startup.rsf, PowerDown.rsf, OverpowerAlert.rsf,
GeneralAlarm.rsf, DownloadSucces.rsf, and Click.rsf.
NOTE: Do not include the '.rsf' extension in the filename.
"""
self._msg.append(Opcode.SOUND)
self._msg.append(SoundSubcode.PLAY)
self._append_param(volume)
self._append_param(filename, ParamType.LCS)
@safe_add
def add_ui_read_get_fw_vers(self):
"""Returns the FW version as a string in the form 'VX.XXX'."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_FW_VERS)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_hw_vers(self):
"""Returns the HW version as a string in the form 'VX.XXX'."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_HW_VERS)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_fw_build(self):
"""Returns the firmware build as a string in the form 'XXXXXXXXXX'."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_FW_BUILD)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_os_vers(self):
"""Returns the OS version as a string in the form 'Linux X.X.XX'."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_OS_VERS)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_os_build(self):
"""Returns the OS build as a string in the form 'XXXXXXXXXX'."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_OS_BUILD)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_version(self):
"""Returns the Lego Mindstorms version as a string in the form
'LMS2012 VX.XXX(<TIMESTAMP>)'.
"""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_VERSION)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_ip(self):
"""Returns the IP address as a string."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_IP)
self._append_param(MAX_VERSION_STR_LEN, ParamType.LC2)
self._append_reply_param((DataFormat.DATA_S, MAX_NAME_STR_LEN))
@safe_add
def add_ui_read_get_vbatt(self):
"""Gets the current battery voltage. According to the constants that are
defined in 'lms2012.h', the rechargeable battery should be in the range
of [6.0, 7.1] and normal batteries should be in the range of [4.5, 6.2].
"""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_VBATT)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_ui_read_get_lbatt(self):
"""Gets the current battery level as a percentage."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_LBATT)
self._append_reply_param(DataFormat.DATA_PCT)
@safe_add
def add_ui_read_get_ibatt(self):
"""Gets the current battery discharge amperage."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_IBATT)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_ui_read_get_tbatt(self):
"""Gets the current battery temperature rise."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_TBATT)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_ui_read_get_imotor(self):
"""Gets the amount of current that the motors are using."""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_IMOTOR)
self._append_reply_param(DataFormat.DATA_F)
@safe_add
def add_ui_read_get_sdcard(self):
"""Returns the following information about the SD card:
(<SD_CARD_OK>, <TOTAL_KBYTES>, <FREE_KBYTES>). The SD_CARD_OK value is
a boolean.
"""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_SDCARD)
self._global_params_types.append(self._REPLY_TUPLE_OPEN_TOKEN)
self._append_reply_param(DataFormat.BOOL)
self._append_reply_param(DataFormat.DATA32)
self._append_reply_param(DataFormat.DATA32)
self._global_params_types.append(self._REPLY_TUPLE_CLOSE_TOKEN)
@safe_add
def add_ui_read_get_usbstick(self):
"""Returns the following information about the USB stick:
(<USB_STICK_OK>, <TOTAL_KBYTES>, <FREE_KBYTES>). The USB_STICK_OK value
is a boolean.
"""
self._msg.append(Opcode.UI_READ)
self._msg.append(UIReadSubcode.GET_SDCARD)
self._global_params_types.append(self._REPLY_TUPLE_OPEN_TOKEN)
self._append_reply_param(DataFormat.BOOL)
self._append_reply_param(DataFormat.DATA32)
self._append_reply_param(DataFormat.DATA32)
self._global_params_types.append(self._REPLY_TUPLE_CLOSE_TOKEN)
@safe_add
def add_output_get_type(self, output_port, layer=USB_CHAIN_LAYER_MASTER):
"""Returns the DeviceType of the device that is connected to the
specified OutputPort.
"""
self._msg.append(Opcode.OUTPUT_GET_TYPE)
self._append_param(layer)
self._append_param(OUTPUT_CHANNEL_TO_INDEX[output_port])
self._append_reply_param(DataFormat.DATA8)
@safe_add
def add_output_set_type(self, output_port,
output_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the DeviceType of the device that is connected to the
specified OutputPort.
TODO: d_pwm.c says this only works with type TACHO or MINI_TACHO.
TODO: Debug this. Not sure how to verify that this works.
It seems to be implemented in d_pwm.c
"""
self._msg.append(Opcode.OUTPUT_SET_TYPE)
self._append_param(layer)
self._append_param(output_type)
@safe_add
def add_output_reset(self, output_port_mask,
layer=USB_CHAIN_LAYER_MASTER):
"""Resets the tacho count and timer of the motor(s) described by the
output_port_mask parameter. Should be called when initializing a
motor?
"""
self._msg.append(Opcode.OUTPUT_RESET)
self._append_param(layer)
self._append_param(output_port_mask)
@safe_add
def add_output_stop(self, output_port_mask,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Stops the motor(s) described by the output_port_mask parameter.
The stop_byte parameter defines whether the motor will BRAKE or COAST.
"""
self._msg.append(Opcode.OUTPUT_STOP)
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(stop_type)
@safe_add
def add_output_power(self, output_port_mask,
power,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the power for the motor(s) described by the output_port_mask
parameter. Power values should be in the range [-100, 100]. Note that
add_output_start needs to be called before the motor will start moving.
"""
self._msg.append(Opcode.OUTPUT_POWER)
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(power)
@safe_add
def add_output_speed(self, output_port_mask,
speed,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the speed for the motor(s) described by the output_port_mask
parameter. Speed values should be in the range [-100, 100]. Note that
add_output_start needs to be called before the motor will start moving.
"""
self._msg.append(Opcode.OUTPUT_SPEED);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(speed)
@safe_add
def add_output_start(self, output_port_mask,
layer=USB_CHAIN_LAYER_MASTER):
"""Starts the motor(s) described by the output_port_mask
parameter.
"""
self._msg.append(Opcode.OUTPUT_START)
self._append_param(layer)
self._append_param(output_port_mask)
@safe_add
def add_output_polarity(self, output_port_mask,
polarity_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the polarity for the motor(s) described by the
output_port_mask parameter.
"""
self._msg.append(Opcode.OUTPUT_POLARITY)
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(polarity_type)
@safe_add
def add_output_read(self, output_port, layer=USB_CHAIN_LAYER_MASTER):
"""Reads the speed and tacho pulses for the given output_port and
returns them as a tuple in the form (SPEED, TACHO_PULSES).
"""
self._msg.append(Opcode.OUTPUT_READ)
self._append_param(layer)
self._append_param(OUTPUT_CHANNEL_TO_INDEX[output_port])
self._global_params_types.append(self._REPLY_TUPLE_OPEN_TOKEN)
self._append_reply_param(DataFormat.DATA8)
self._append_reply_param(DataFormat.DATA32)
self._global_params_types.append(self._REPLY_TUPLE_CLOSE_TOKEN)
@safe_add
def add_output_ready(self, output_port_mask,
layer=USB_CHAIN_LAYER_MASTER):
"""Waits for the outputs in the output_port_mask to report that
they are ready before executing the next opcode. For example, if two
consecutive motor commands are used with the same OutputPort putting
this opcode between them ensures that the first command finishes
before the second one is started.
"""
self._msg.append(Opcode.OUTPUT_READY)
self._append_param(layer)
self._append_param(output_port_mask)
@safe_add
def add_output_position(self, output_port_mask,
position,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the position of the specied OutputPort(s)."""
self._msg.append(Opcode.OUTPUT_POSITION)
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(position, ParamType.LC4)
@safe_add
def add_output_step_power(self, output_port_mask,
power,
ramp_up_steps,
steps,
ramp_down_steps,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Ramps up the power for the motor(s) described by the
output_port_mask, holds for steps, and then ramps down. It is not
necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_STEP_POWER);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(power)
self._append_param(ramp_up_steps, ParamType.LC4)
self._append_param(steps, ParamType.LC4)
self._append_param(ramp_down_steps, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_time_power(self, output_port_mask,
power,
ramp_up_ms,
time_ms,
ramp_down_ms,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Ramps up the power for the motor(s) described by the
output_port_mask, holds for time_ms, and then ramps down. It is not
necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_TIME_POWER);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(power)
self._append_param(ramp_up_ms, ParamType.LC4)
self._append_param(time_ms, ParamType.LC4)
self._append_param(ramp_down_ms, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_step_speed(self, output_port_mask,
speed,
ramp_up_steps,
steps,
ramp_down_steps,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Ramps up the power for the motor(s) described by the
output_port_mask, holds for steps, and then ramps down. It is not
necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_STEP_SPEED);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(speed)
self._append_param(ramp_up_steps, ParamType.LC4)
self._append_param(steps, ParamType.LC4)
self._append_param(ramp_down_steps, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_time_speed(self, output_port_mask,
speed,
ramp_up_ms,
time_ms,
ramp_down_ms,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Ramps up the power for the motor(s) described by the
output_port_mask, holds for time_ms, and then ramps down. It is not
necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_TIME_SPEED);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(speed)
self._append_param(ramp_up_ms, ParamType.LC4)
self._append_param(time_ms, ParamType.LC4)
self._append_param(ramp_down_ms, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_step_sync(self, output_port_mask,
speed,
turn_ratio,
step,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the speed for the two given motors in the following fashion:
[-200, -101]: Turn right with right motor running in reverse
[-100, -1]: Turn right with right motor slowed
0: Both motors in sync in the same direction
[1, 100]: Turn left with left motor slowed
[101, 200]: Turn left with left motor running in reverse
It is not necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_STEP_SYNC);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(speed)
self._append_param(turn_ratio, ParamType.LC2)
self._append_param(step, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_time_sync(self, output_port_mask,
speed,
turn_ratio,
time,
stop_type,
layer=USB_CHAIN_LAYER_MASTER):
"""Sets the speed for the two given motors in the following fashion:
[-200, -101]: Turn right with right motor running in reverse
[-100, -1]: Turn right with right motor slowed
0: Both motors in sync in the same direction
[1, 100]: Turn left with left motor slowed
[101, 200]: Turn left with left motor running in reverse
It is not necessary to call add_output_start in addition to this opcode.
NOTE: The EV3 will NOT wait for this operation to complete before
executing the next opcode unless add_output_ready is used.
"""
self._msg.append(Opcode.OUTPUT_TIME_SYNC);
self._append_param(layer)
self._append_param(output_port_mask)
self._append_param(speed)
self._append_param(turn_ratio, ParamType.LC2)
self._append_param(time, ParamType.LC4)
self._append_param(stop_type)
@safe_add
def add_output_clr_count(self, output_port_mask,
layer=USB_CHAIN_LAYER_MASTER):
"""Clears the tacho count for the given OutputPort(s) when in sensor
mode.
"""
self._msg.append(Opcode.OUTPUT_CLR_COUNT);
self._append_param(layer)
self._append_param(output_port_mask)
@safe_add
def add_output_get_count(self, output_port,
layer=USB_CHAIN_LAYER_MASTER):
"""Returns the tacho count for the given OutputPort when in sensor
mode.
"""
self._msg.append(Opcode.OUTPUT_GET_COUNT);
self._append_param(layer)
self._append_param(OUTPUT_CHANNEL_TO_INDEX[output_port])
self._append_reply_param(DataFormat.DATA32)
@safe_add
def add_set_leds(self, led_pattern):
"""Sets the LEDs on the front of the brick to the specified item in
the LEDPattern enumeration.
"""
self._msg.append(Opcode.UI_WRITE)
self._msg.append(UIWriteSubcode.LED)
self._append_param(led_pattern)
def _parse_reply(self, buf):
result = []
index = 0
if (ReplyType.DIRECT_REPLY_ERROR == buf[0]):
raise DirectCommandError('The DirectCommand failed.')
if (self._global_params_byte_count != (len(buf) - 1)):
raise DirectCommandError('The data returned by the ' +
'command was smaller than expected.')
# The items in the reply are grouped into tuples. Each tuple represents
# the reply to a command that returns multiple values.
sub_tuple = None
for item in self._global_params_types:
value = None
length = 0
if (self._REPLY_TUPLE_OPEN_TOKEN == item):
sub_tuple = []
continue
elif (self._REPLY_TUPLE_CLOSE_TOKEN == item):
result.append(tuple(sub_tuple))
sub_tuple = None
continue
if (isinstance(item, tuple)):
value, length = self._parse_param(buf, (index + 1), *item)
else:
# Ensure that the alignment is correct.
data_len = DATA_FORMAT_LENS[item]
pad = (index % data_len)
if (0 != pad):
pad = (data_len - pad)
index += pad
value, length = self._parse_param(buf, (index + 1), item)
if (sub_tuple is not None):
sub_tuple.append(value)
else:
result.append(value)
index += length
return tuple(result)
def _parse_param(self, buf, index, data_format, data_len=None):
value = None
length = 1
if (DataFormat.DATA_S == data_format):
value = message.parse_null_terminated_str(buf, index, data_len)
length = data_len
elif (DataFormat.HND == data_format):
value = (buf[index] & ~ParamType.HND)
elif (DataFormat.DATA_F == data_format):
value = message.parse_float(buf, index)
length = DATA_FORMAT_LENS[DataFormat.DATA_F]
elif (DataFormat.BOOL == data_format):
value = bool(buf[index])
length = 1
else:
length = DATA_FORMAT_LENS[data_format]
if (1 == length):
value = buf[index]
elif (2 == length):
value = message.parse_u16(buf, index)
elif (4 == length):
value = message.parse_u32(buf, index)
else:
raise DirectCommandError('Unexpected ParamType: %d' %
param_type)
return (value, length)
def _append_reply_param(self, reply_format):
"""Global parameters are stored in the tx buffer on the brick so
their values are returned in the message reply.
"""
data_len = None
if (not isinstance(reply_format, tuple)):
# Ensure that the alignment is correct.
data_len = DATA_FORMAT_LENS[reply_format]
pad = (self._global_params_byte_count % data_len)
if (pad):
pad = (data_len - pad)
self._global_params_byte_count += pad
else:
data_len = reply_format[1]
# Use as few bits as possible to save space in message buffer.
param_type = ParamType.GV1
if (0xFFFF < self._global_params_byte_count):
param_type = ParamType.GV4
elif (0xFF < self._global_params_byte_count):
param_type = ParamType.GV2
self._append_param(self._global_params_byte_count, param_type)
self._global_params_types.append(reply_format)
self._global_params_byte_count += data_len
def _allocate_local_param(self, data_format):
"""Local parameters are essentially stack variables so they are NOT
included in the reply from the brick. This function returns an index
that can be used to access a new local variable of the given DataFormat.
"""
# Ensure that the alignment is correct.
data_len = DATA_FORMAT_LENS[data_format]
pad = (self._local_params_byte_count % data_len)
if (pad):
pad = (data_len - pad)
self._local_params_byte_count += pad
# Use as few bits as possible to save space in message buffer.
param_type = ParamType.LV1
if (0xFFFF < self._local_params_byte_count):
param_type = ParamType.LV4
elif (0xFF < self._local_params_byte_count):
param_type = ParamType.LV2
index = self._local_params_byte_count
self._local_params_byte_count += data_len
return (index, param_type)
def _append_local_constant(self, val):
""""Appends an immediate value as a local constant."""
param_type = None
if (isinstance(val, int)):
num_bits = int.bit_length(val)
if (num_bits > 16):
param_type = ParamType.LC4
elif (num_bits > 8):
param_type = ParamType.LC2
elif (num_bits > 6):
param_type = ParamType.LC1
else:
param_type = ParamType.LC0
elif (isinstance(val, float)):
param_type = ParamType.FLOAT
elif (isinstance(val, str)):
param_type = ParamType.LCS
else:
raise NotImplementedError('Unknown local constant type.')
self._append_param(val, param_type)
def _append_param(self, val, param_type=ParamType.LC1):
"""All parameters need to be prefixed with their type so the VM knows
how to interpret the following data. The reply_format parameter is
used when a reply is expected.
"""
if (ParamType.PRIMPAR_LABEL == param_type):
raise NotImplementedError('ParamType.PRIMPAR_LABEL')
elif (ParamType.LCS == param_type):
self._msg.append(param_type)
message.append_str(self._msg, val)
elif (ParamType.LC0 == param_type):
self._msg.append(ParamType.LC0 | (0x3F & val))
elif (ParamType.HND == param_type):
self._msg.append(ParamType.HND | val)
elif (ParamType.ADR == param_type):
self._msg.append(ParamType.ADR | val)
elif (ParamType.GV0 == param_type):
self._msg.append(ParamType.GV0 | (0x1F & val))
elif (ParamType.FLOAT == param_type):
self._msg.append(ParamType.LC4)
message.append_float(self._msg, val)
else:
length = PARAM_TYPE_LENS[param_type]
self._msg.append(param_type)
if (1 == length):
message.append_u8(self._msg, val)
elif (2 == length):
message.append_u16(self._msg, val)
elif (4 == length):
message.append_u32(self._msg, val)
else:
raise DirectCommandError('Unexpected ParamType:' +
' %d' % param_type)
| {
"repo_name": "inductivekickback/ev3",
"path": "ev3/direct_command.py",
"copies": "1",
"size": "72705",
"license": "mit",
"hash": -2377102040094439400,
"line_mean": 31.8535924085,
"line_max": 80,
"alpha_frac": 0.5311464136,
"autogenerated": false,
"ratio": 3.658481356614502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4689627770214502,
"avg_score": null,
"num_lines": null
} |
"""A simple interface for SNMP sessions."""
import collections
# TODO(soltesz): replace netsnmp with easysnmp when available.
import netsnmp
class Error(Exception):
"""Base error class for simple_session module."""
pass
class SNMPError(Error):
"""A netsnmp error occurred or no results returned."""
pass
# TODO(soltesz): replace SNMPVariable with easysnmp.SNMPVariable when available.
class SNMPVariable(object):
"""An SNMP variable representing the result of a get or walk query.
Attributes:
oid: str, the OID of the variable.
value: str, the OID value.
"""
def __init__(self, oid, value):
self.oid = oid
self.value = value
class SimpleSession(object):
"""A simpler session interface for netsnmp.Session."""
def __init__(self, session):
"""Creates a SimpleSession object.
Args:
session: netsnmp.Session, the SNMP session to use for queries.
"""
self._session = session
def get(self, oid):
"""Reads the value at the given OID.
If an error occurs such that the get could not be completed, an empty
list is returned.
Returns:
list of SNMPVariable, the results of the get request.
Raises:
SNMPError: netsnmp session reports any error, or no result from get.
"""
oids = netsnmp.VarList(netsnmp.Varbind(oid))
self._session.get(oids)
self._check_errorstr()
return _convert_result(oid, oids)
def walk(self, oid):
"""Reads all values in the MIB tree starting at the given OID.
If an error occurs such that the walk could not be completed, an empty
list is returned.
Returns:
list of SNMPVariable, the results of the walk request.
Raises:
SNMPError: netsnmp session reports any error, or no results from walk.
"""
oids = netsnmp.VarList(netsnmp.Varbind(oid))
self._session.walk(oids)
self._check_errorstr()
return _convert_result(oid, oids)
def _check_errorstr(self):
"""Raises an exception if the session ErrorStr attribute is set."""
if self._session.ErrorStr:
raise SNMPError('netsnmp error; %s: %s' % (self._session.ErrorStr,
self._session.ErrorInd))
class FakeSession(object):
"""Implements a fake SimpleSession interface for tests."""
def __init__(self):
self._mib = collections.defaultdict(list)
def prepare(self, oid, tag, value):
"""Associates an OID with a tag,value for later calls to get or walk."""
self._mib[oid].append(SNMPVariable(tag, value))
def get(self, oid):
return self._find_oid(oid)
def walk(self, oid):
return self._find_oid(oid)
def _find_oid(self, oid):
result = self._mib[oid]
if not result:
raise SNMPError('Empty result set.')
return result
def _varlist_to_list(varlist):
"""Converts a netsnmp.Varlist to a list of SNMPVariable."""
# The oid.val attribute starts as None and is set after a successful
# request. If it is still None, then the request failed and it should
# be excluded from the return value.
result = []
for oid in varlist:
if oid.val is not None:
tag = oid.tag + ('.' + oid.iid if oid.iid else '')
result.append(SNMPVariable(tag, oid.val))
return result
def _convert_result(original_oid, varlist):
"""Converts a netsnmp.Varlist to a list of SNMPVariable.
Args:
original_oid: str, the original OID requested.
varlist: netsnmp.VarList, a container filled with netsnmp.Varbind
objects.
Returns:
list of SNMPVariable, each netsnmp.Varbind element from varlist
converted to an SNMPVariable.
Raises:
SNMPError: after conversion, the result was an empty set.
"""
result = _varlist_to_list(varlist)
if not result:
raise SNMPError('netsnmp error unknown; OID may be invalid: ' +
original_oid)
return result
| {
"repo_name": "stephen-soltesz/collectd-mlab",
"path": "site-packages/mlab/disco/simple_session.py",
"copies": "2",
"size": "4161",
"license": "apache-2.0",
"hash": 6345156311470757000,
"line_mean": 28.9352517986,
"line_max": 80,
"alpha_frac": 0.6207642394,
"autogenerated": false,
"ratio": 4.1198019801980195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002664535038635758,
"num_lines": 139
} |
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in range(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.gfile.Glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.gfile.Open(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
"""
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines):
counts.update(encode(_native_to_unicode(doc)))
return counts
def vocab_token_counts(text_filepattern, max_lines):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
tf.logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
| {
"repo_name": "mlperf/training_results_v0.6",
"path": "Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/transformer/data_generators/tokenizer.py",
"copies": "6",
"size": "5545",
"license": "apache-2.0",
"hash": -1107554320739040400,
"line_mean": 29.9776536313,
"line_max": 80,
"alpha_frac": 0.6750225428,
"autogenerated": false,
"ratio": 3.779822767552829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7454845310352829,
"avg_score": null,
"num_lines": null
} |
"""A simple IRC bot for Twitch.
First things first, you need the Username and OAuth token of the account you
want to login with. Typically this would be for your bot, but you can login with
any Twitch account.
You can get your OAuth token from: http://twitchapps.com/tmi/
A standard bot would look like this:
user = "ExampleBot"
oauth = "oauth:******************************"
bot = TwitchBot() # Create your bot.
bot.login(user, oauth) # Login to the server with your username and OAuth.
bot.join("#twitch") # Join a channel.
bot.listen() # Monitor the chat for commands. This starts a loop,
# so it should be run last.
If Twitch changes the host or port of their IRC server and this bot is not
updated, you can change them when instantiating:
bot = TwitchBot(host="bots.twitch.tv", port=6668)
You might want your bot to monitor multiple channels at the same time. You can
do this by running the `join` method for each channel:
channels = ["#twitch", "#example"]
for channel in channels:
bot.join(channel)
"""
import re
from time import sleep
from twitchbot.irc import IRC
from twitchbot.chat_message import ChatMessage
class TwitchBot(IRC):
def __init__(self, name, host="irc.twitch.tv", port=6667, commands=None,
command_prefix="!"):
"""Initializes a new Twitch bot."""
IRC.__init__(self, host, port)
self.custom_commands = commands
self.command_prefix = command_prefix
# These regular expressions are used frequently to parse chat messages.
#
# Patterns are stored here instead of in the global scope because they
# need access to the command prefix and bot name.
self.patterns = {
# ":dtree!dtree@dtree.tmi.twitch.tv PRIVMSG #dtree :Hello!" is split
# into ("dtree", "PRIVMSG", "#dtree", "Hello!")
"privmsg": re.compile(r":(\w+).+ ([A-Z]+) (#\w+) :(.+)"),
# Matches a greeting to the bot.
"greet": re.compile(r"(Hey|Hi|Hello|Hola|Howdy)\s+%s" % name,
flags=re.I),
# Matches a custom command. !twitter, !youtube, etc.
"command": re.compile(r"^%s(\w+)" % command_prefix, flags=re.I)
}
def _greet(self, message):
pattern = self.patterns["greet"]
if pattern.match(message.content):
self.say(message.channel, "Hi, {}!".format(message.sender))
def _is_custom_command(self, message):
if self.custom_commands:
pattern = self.patterns["command"]
if pattern.match(message.content):
return True
return False
def _run_custom_command(self, message):
pattern = self.patterns["command"]
command = pattern.match(message.content).group(1)
if command in self.custom_commands:
reply = self.custom_commands[command]["message"]
self.say(message.channel, reply)
def _parse_command(self, msg):
if "PRIVMSG" in msg:
message = ChatMessage(msg)
self._greet(message)
if self._is_custom_command(message):
self._run_custom_command(message)
def _read_chat(self):
for line in self.file:
line = line.strip()
print(line)
self.pong(line)
self._parse_command(line)
def login(self, user, oauth):
"""Login to Twitch's IRC server.
Twitch uses OAuth tokens instead of passwords to login. You can generate
one for your account here: http://twitchapps.com/tmi/
user - Your Username on Twitch.
oauth - The OAuth token for your account.
"""
self.send("PASS {0}".format(oauth))
self.send("NICK {0}".format(user))
def listen(self):
try:
while True:
self._read_chat()
# Using a short delay prevents the bot from using upwards of 25%
# of the user's CPU.
sleep(.1)
except KeyboardInterrupt:
print("Connection closed. Bye!")
| {
"repo_name": "VoxelDavid/twitch-bot",
"path": "twitchbot/bot.py",
"copies": "1",
"size": "4141",
"license": "mit",
"hash": 4507125435728963600,
"line_mean": 34.093220339,
"line_max": 80,
"alpha_frac": 0.5933349433,
"autogenerated": false,
"ratio": 3.932573599240266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010959323739374392,
"num_lines": 118
} |
"""A simple library for serializing data to disk and allowing random
access.
Probably best not to use this-- either straightforward pickling or a
"proper" scientific format like netCDF or HDF.
I don't think any subgrid runs use this, but I can't be sure.
"""
import cPickle
import os.path
# i.e. 0.5GB
MAXFILESIZE = 2**29
VERSION = 3
def new(name):
"""Factory function that creates a new RecordDB with filename name."""
rdb = RecordDB()
rdb.new(name)
return rdb
def open(name, readonly=False):
"""Factory function that opens an existing RecordDB with filename name."""
ver = _versionCheck(name)
assert ver <= VERSION
if ver == 1:
rdb = RecordDB_V1()
elif ver == 2:
rdb = RecordDB_V2()
else:
rdb = RecordDB()
rdb.open(name, readonly)
return rdb
def _versionCheck(name):
name = os.path.abspath(name)
assert os.path.exists(name)
assert os.path.isdir(name)
v = 1
for f in os.listdir(name):
(base,ext) = os.path.splitext(f)
if base == 'version':
v = int(ext[1:])
break
continue
return v
class RecordDB_V3(object):
"""Class that wraps an indexed store of records.
Slightly altered to append only, making it suitable for use with
auto-checkpointing!"""
def __init__(self):
self.version = 3
self.readonly = False
return
def new(self, name):
"""Initialises a new RDB."""
name = os.path.abspath(name)
assert not os.path.exists(name)
self.name = name
os.mkdir(name)
file(os.path.join(name, 'version.%d' % self.version), 'w').close()
self.map = []
self.mapFN = os.path.join(name, 'map.dat')
file(self.mapFN, 'w').close()
return
def open(self, name, readonly):
"""Opens an existing RDB."""
name = os.path.abspath(name)
assert os.path.exists(name)
assert os.path.isdir(name)
self.name = name
self.mapFN = os.path.join(self.name, 'map.dat')
assert os.path.exists(self.mapFN)
assert os.path.isfile(self.mapFN)
self.loadMap()
self.readonly = readonly
return
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key) is int:
(fn, posn, tag) = self.map[key]
else:
found = []
for entry in self.map:
(fn, posn, tag) = entry
if tag == key:
found.append(entry)
pass
continue
if len(found) == 0:
raise KeyError(key)
if len(found) > 1:
raise KeyError("Duplicate keys cannot be searched for!")
(fn, posn, tag) = found[0]
f = file(os.path.join(self.name,fn), 'rb')
try:
f.seek(posn)
p = cPickle.Unpickler(f)
return p.load()
finally:
f.close()
def append(self, item, tag=None):
"""Appends item to the DB."""
assert self.readonly == False
fn = self.chooseFile(item)
f = file(os.path.join(self.name,fn), 'ab')
try:
posn = f.tell()
p = cPickle.Pickler(f, protocol=2)
p.dump(item)
finally:
f.close()
# update the map
self.map.append((fn, posn, tag))
mapfile = file(self.mapFN, 'ab')
cPickle.dump((fn, posn, tag), mapfile, protocol=2)
mapfile.close()
return
def __setitem__(self, key, value):
raise NotImplementedError
def loadMap(self):
"""Loads the map from the mapfile."""
f = file(self.mapFN, 'rb')
self.map = []
while True:
try:
self.map.append(cPickle.load(f))
except EOFError:
break
f.close()
return
def chooseFile(self, item):
"""Selects the filename to which to write the next object."""
# get the last file
if len(self.map) == 0:
fn = "000000.dat"
else:
(fn, posn, tag) = self.map[-1]
sizeB = os.path.getsize(os.path.join(self.name,fn))
if sizeB > MAXFILESIZE:
ind = int(fn[:6]) + 1
assert ind <= 999999
fn = "%.6d.dat" % ind
return fn
class RecordDB_V2(object):
"""Class that wraps an indexed store of records."""
def __init__(self):
self.readonly = False
return
def new(self, name):
"""Initialises a new RDB."""
name = os.path.abspath(name)
assert not os.path.exists(name)
self.name = name
os.mkdir(name)
file(os.path.join(name, 'version.%d' % 2), 'w').close()
self.version = 2
self.map = []
self.mapFN = os.path.join(name, 'map.dat')
self.dumpMap()
return
def open(self, name, readonly):
"""Opens an existing RDB."""
name = os.path.abspath(name)
assert os.path.exists(name)
assert os.path.isdir(name)
self.name = name
self.mapFN = os.path.join(self.name, 'map.dat')
assert os.path.exists(self.mapFN)
assert os.path.isfile(self.mapFN)
self.loadMap()
self.readonly = readonly
return
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key) is int:
(fn, posn, tag) = self.map[key]
else:
found = []
for entry in self.map:
(fn, posn, tag) = entry
if tag == key:
found.append(entry)
pass
continue
if len(found) == 0:
raise KeyError(key)
if len(found) > 1:
raise KeyError("Duplicate keys cannot be searched for!")
(fn, posn, tag) = found[0]
f = file(os.path.join(self.name,fn), 'rb')
try:
f.seek(posn)
p = cPickle.Unpickler(f)
return p.load()
finally:
f.close()
def append(self, item, tag=None):
"""Appends item to the DB."""
assert self.readonly == False
fn = self.chooseFile(item)
f = file(os.path.join(self.name,fn), 'ab')
try:
posn = f.tell()
p = cPickle.Pickler(f, protocol=2)
p.dump(item)
finally:
f.close()
# update the map
self.map.append((fn, posn, tag))
self.dumpMap()
return
def __setitem__(self, key, value):
raise NotImplementedError
def dumpMap(self):
"""Writes the map to the mapfile."""
f = file(self.mapFN, 'wb')
try:
p = cPickle.Pickler(f, protocol=2)
p.dump(self.map)
finally:
f.close()
return
def loadMap(self):
"""Loads the map from the mapfile."""
f = file(self.mapFN, 'rb')
try:
p = cPickle.Unpickler(f)
self.map = p.load()
finally:
f.close()
return
def chooseFile(self, item):
"""Selects the filename to which to write the next object."""
# get the last file
if len(self.map) == 0:
fn = "000000.dat"
else:
(fn, posn, tag) = self.map[-1]
sizeB = os.path.getsize(os.path.join(self.name,fn))
if sizeB > MAXFILESIZE:
ind = int(fn[:6]) + 1
assert ind <= 999999
fn = "%.6d.dat" % ind
return fn
class RecordDB_V1(object):
"""Class that wraps an indexed store of records."""
def __init__(self):
self.readonly = False
return
def new(self, name):
"""Initialises a new RDB."""
name = os.path.abspath(name)
assert not os.path.exists(name)
self.name = name
os.mkdir(name)
self.map = []
self.mapFN = os.path.join(name, 'map.dat')
self.dumpMap()
return
def open(self, name, readonly):
"""Opens an existing RDB."""
name = os.path.abspath(name)
assert os.path.exists(name)
assert os.path.isdir(name)
self.name = name
self.mapFN = os.path.join(self.name, 'map.dat')
assert os.path.exists(self.mapFN)
assert os.path.isfile(self.mapFN)
self.loadMap()
self.readonly = readonly
return
def __len__(self):
return len(self.map)
def __getitem__(self, key):
(fn, posn) = self.map[key]
f = file(os.path.join(self.name,fn), 'rb')
try:
f.seek(posn)
p = cPickle.Unpickler(f)
return p.load()
finally:
f.close()
def append(self, item):
"""Appends item to the DB."""
assert self.readonly == False
fn = self.chooseFile(item)
f = file(os.path.join(self.name,fn), 'ab')
try:
posn = f.tell()
p = cPickle.Pickler(f, protocol=2)
p.dump(item)
finally:
f.close()
# update the map
self.map.append((fn, posn))
self.dumpMap()
return
def __setitem__(self, key, value):
raise NotImplementedError
def dumpMap(self):
"""Writes the map to the mapfile."""
f = file(self.mapFN, 'wb')
try:
p = cPickle.Pickler(f, protocol=2)
p.dump(self.map)
finally:
f.close()
return
def loadMap(self):
"""Loads the map from the mapfile."""
f = file(self.mapFN, 'rb')
try:
p = cPickle.Unpickler(f)
self.map = p.load()
finally:
f.close()
return
def chooseFile(self, item):
"""Selects the filename to which to write the next object."""
# get the last file
if len(self.map) == 0:
fn = "000000.dat"
else:
(fn, posn) = self.map[-1]
sizeB = os.path.getsize(os.path.join(self.name,fn))
if sizeB > MAXFILESIZE:
ind = int(fn[:6]) + 1
assert ind <= 999999
fn = "%.6d.dat" % ind
return fn
pass
RecordDB = RecordDB_V3
| {
"repo_name": "rupertnash/subgrid",
"path": "python/dqTools/recordDB.py",
"copies": "2",
"size": "10739",
"license": "mit",
"hash": 5705073775978799000,
"line_mean": 25.7805486284,
"line_max": 78,
"alpha_frac": 0.4905484682,
"autogenerated": false,
"ratio": 3.8463467048710602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5336895173071061,
"avg_score": null,
"num_lines": null
} |
""" A simple lisp parser for SICP exercises
"""
import re
__all__ = ["parse"]
SPECIEAL_TOKENS = {"'": 'quote',
".": 'dot',}
def tokenize(expr):
regex = re.compile("""(
'| # quote
\(| # left paren
\)| # right paren
[\w.?!+-></=*+]+| # identifier
".*?"| # string
\.
)
""", re.VERBOSE)
return (x for x in re.findall(regex, expr) if x != '')
def parse(expr):
reader = token_reader()
reader.send(None)
try:
for token in tokenize(expr):
reader.send(token)
except StopIteration as e:
return e.value
def token_reader():
"""Consume tokens to construct a lisp expression
"""
token = yield
if token == '(':
parsed_expr = []
while True:
exp1 = yield from token_reader()
if exp1 == ')': return parsed_expr
# dotted expression must be the last one in a list
if is_dotted(exp1):
if (yield from token_reader()) == ')':
return parsed_expr + [exp1]
raise ValueError('Invalid dot expression')
parsed_expr.append(exp1)
# quote and dot may not be necessary for the register machine simulator.
elif token in SPECIEAL_TOKENS:
return [SPECIEAL_TOKENS[token], (yield from token_reader())]
return atom(token)
def atom(token):
try: return int(token)
except ValueError:
try: return float(token)
except ValueError: return token
def is_dotted(exp):
return isinstance(exp, list) \
and len(exp) == 2 \
and exp[0] == SPECIEAL_TOKENS['.']
| {
"repo_name": "nalssee/SICP",
"path": "lisp_parser/lp.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": -952616051426504400,
"line_mean": 25.1060606061,
"line_max": 76,
"alpha_frac": 0.5153801509,
"autogenerated": false,
"ratio": 3.924829157175399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4940209308075399,
"avg_score": null,
"num_lines": null
} |
""" A simple list box widget with a model-view architecture. """
from __future__ import absolute_import
# Major package imports.
import wx
# Enthought library imports.
from traits.api import Event, Instance, Int
# Local imports.
from .list_box_model import ListBoxModel
from .widget import Widget
class ListBox(Widget):
""" A simple list box widget with a model-view architecture. """
# The model that provides the data for the list box.
model = Instance(ListBoxModel)
# The objects currently selected in the list.
selection = Int(-1)
# Events.
# An item has been activated.
item_activated = Event
# Default style.
STYLE = wx.LB_SINGLE | wx.LB_HSCROLL | wx.LB_NEEDED_SB
def __init__(self, parent, **traits):
""" Creates a new list box. """
# Base-class constructors.
super(ListBox, self).__init__(**traits)
# Create the widget!
self._create_control(parent)
# Listen for changes to the model.
self.model.on_trait_change(self._on_model_changed, "list_changed")
return
def dispose(self):
self.model.on_trait_change(self._on_model_changed, "list_changed",
remove = True)
self.model.dispose()
return
###########################################################################
# 'ListBox' interface.
###########################################################################
def refresh(self):
""" Refreshes the list box. """
# For now we just clear out the entire list.
self.control.Clear()
# Populate the list.
self._populate()
return
###########################################################################
# wx event handlers.
###########################################################################
def _on_item_selected(self, event):
""" Called when an item in the list is selected. """
listbox = event.GetEventObject()
self.selection = listbox.GetSelection()
return
def _on_item_activated(self, event):
""" Called when an item in the list is activated. """
listbox = event.GetEventObject()
index = listbox.GetSelection()
# Trait event notification.
self.item_activated = index
return
###########################################################################
# Trait handlers.
###########################################################################
#### Static ###############################################################
def _selection_changed(self, index):
""" Called when the selected item is changed. """
if index != -1:
self.control.SetSelection(index)
return
#### Dynamic ##############################################################
def _on_model_changed(self, event):
""" Called when the model has changed. """
# For now we just clear out the entire list.
self.refresh()
return
###########################################################################
# Private interface.
###########################################################################
def _create_control(self, parent):
""" Creates the widget. """
self.control = wx.ListBox(parent, -1, style = self.STYLE)
# Wire it up!
wx.EVT_LISTBOX(self.control, self.control.GetId(),
self._on_item_selected)
wx.EVT_LISTBOX_DCLICK(self.control, self.control.GetId(),
self._on_item_activated)
# Populate the list.
self._populate()
return
def _populate(self):
""" Populates the list box. """
for index in range(self.model.get_item_count()):
label, item = self.model.get_item_at(index)
self.control.Append(label, item)
return
#### EOF ######################################################################
| {
"repo_name": "brett-patterson/pyface",
"path": "pyface/list_box.py",
"copies": "3",
"size": "4046",
"license": "bsd-3-clause",
"hash": 1018576587597123600,
"line_mean": 26.9034482759,
"line_max": 79,
"alpha_frac": 0.4634206624,
"autogenerated": false,
"ratio": 5.01363073110285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009076727869831319,
"num_lines": 145
} |
""" A simple list box widget with a model-view architecture. """
# Major package imports.
import wx
# Enthought library imports.
from enthought.traits.api import Event, Instance, Int
# Local imports.
from list_box_model import ListBoxModel
from widget import Widget
class ListBox(Widget):
""" A simple list box widget with a model-view architecture. """
# The model that provides the data for the list box.
model = Instance(ListBoxModel)
# The objects currently selected in the list.
selection = Int(-1)
# Events.
# An item has been activated.
item_activated = Event
# Default style.
STYLE = wx.LB_SINGLE | wx.LB_HSCROLL | wx.LB_NEEDED_SB
def __init__(self, parent, **traits):
""" Creates a new list box. """
# Base-class constructors.
super(ListBox, self).__init__(**traits)
# Create the widget!
self._create_control(parent)
# Listen for changes to the model.
self.model.on_trait_change(self._on_model_changed, "list_changed")
return
def dispose(self):
self.model.on_trait_change(self._on_model_changed, "list_changed",
remove = True)
self.model.dispose()
return
###########################################################################
# 'ListBox' interface.
###########################################################################
def refresh(self):
""" Refreshes the list box. """
# For now we just clear out the entire list.
self.control.Clear()
# Populate the list.
self._populate()
return
###########################################################################
# wx event handlers.
###########################################################################
def _on_item_selected(self, event):
""" Called when an item in the list is selected. """
listbox = event.GetEventObject()
self.selection = listbox.GetSelection()
return
def _on_item_activated(self, event):
""" Called when an item in the list is activated. """
listbox = event.GetEventObject()
index = listbox.GetSelection()
# Trait event notification.
self.item_activated = index
return
###########################################################################
# Trait handlers.
###########################################################################
#### Static ###############################################################
def _selection_changed(self, index):
""" Called when the selected item is changed. """
if index != -1:
self.control.SetSelection(index)
return
#### Dynamic ##############################################################
def _on_model_changed(self, event):
""" Called when the model has changed. """
# For now we just clear out the entire list.
self.refresh()
return
###########################################################################
# Private interface.
###########################################################################
def _create_control(self, parent):
""" Creates the widget. """
self.control = wx.ListBox(parent, -1, style = self.STYLE)
# Wire it up!
wx.EVT_LISTBOX(self.control, self.control.GetId(),
self._on_item_selected)
wx.EVT_LISTBOX_DCLICK(self.control, self.control.GetId(),
self._on_item_activated)
# Populate the list.
self._populate()
return
def _populate(self):
""" Populates the list box. """
for index in range(self.model.get_item_count()):
label, item = self.model.get_item_at(index)
self.control.Append(label, item)
return
#### EOF ######################################################################
| {
"repo_name": "enthought/traitsgui",
"path": "enthought/pyface/list_box.py",
"copies": "1",
"size": "4016",
"license": "bsd-3-clause",
"hash": -755547686109574100,
"line_mean": 26.6965517241,
"line_max": 79,
"alpha_frac": 0.4616533865,
"autogenerated": false,
"ratio": 5.02,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009076727869831319,
"num_lines": 145
} |
# A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random, os, resource, string, re
resource.setrlimit(resource.RLIMIT_NOFILE, (65536, 65536))
hosts = os.environ.get('SERVERS').split(',')
passfile = os.environ.get('PASSFILE')
passwords = {
'livetest': '123'
}
if passfile is not None:
passwords = simplejson.load(file(passfile))
listusers = True
class MyTaskSet(TaskSet):
def qualify(self, url):
result = 'http://' + random.choice(hosts) + url
return result
def userdomain(self, user):
if user is not None and len(user) > 0:
return user + '.pencilcode.net'
return 'pencilcode.net'
def topget(self, url):
name = re.sub('=[^&=]+', '=...', url)
return self.client.get(self.qualify(url),
headers={"User-Agent":"locust", "Host": self.userdomain(None)},
name=name)
def myget(self, user, url):
slashpos = url.find('/', 1) + 1 or len(url)
name = 'user:' + url[:slashpos]
if slashpos < len(url):
name += '...'
return self.client.get(self.qualify(url),
headers={"User-Agent":"locust", "Host": self.userdomain(user)},
name=name)
def mypost(self, user, url, data):
slashpos = url.find('/', 1) + 1 or len(url)
name = 'user:' + url[:slashpos]
if slashpos < len(url):
name += '...'
return self.client.post(self.qualify(url), data,
headers={"User-Agent":"locust", "Host": self.userdomain(user)},
name=name)
@task(1)
def index(self):
urls = ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/lib/seedrandom.js', '/turtlebits.js']
if listusers:
urls.append('/load/?callback=loadusers')
if random.randrange(2) > 0:
prefix = random.choice(string.ascii_letters).lower();
else:
prefix = random.choice(passwords.keys())
urls.append('/load/?prefix=' + prefix)
for url in urls:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png']:
self.myget('promo', url)
@task(1)
def edit(self):
if listusers:
try:
topdir = self.topget("/load/").json()
except:
print 'error listing all users'
return
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
else:
randname = random.choice(passwords.keys())
for url in ['/edit/', '/load/']:
self.myget(randname, url)
for url in ['/editor.js', '/favicon.ico',
'/apple-touch-icon.png']:
self.topget(url)
@task(1)
def browserandom(self):
randname = random.choice(passwords.keys())
try:
mydir = self.myget(randname, '/load/').json()
except Exception as e:
print 'error listing ' + randname + ' ' + str(e)
raise
if len(mydir['list']) == 0:
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except Exception as e:
print 'error reading ' + randname + ' ' + randfile + ' ' + str(e)
raise
@task(1)
def saverandom(self):
randname = random.choice(passwords.keys())
try:
myok = self.mypost(randname, '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': passwords[randname]
}).json()
except Exception as e:
print 'error saving ' + randname + ' ' + str(e)
raise
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 20000
| {
"repo_name": "sakagg/pencilcode",
"path": "admin/simpleloadtest.py",
"copies": "7",
"size": "4009",
"license": "mit",
"hash": 8890432482637129000,
"line_mean": 30.5669291339,
"line_max": 75,
"alpha_frac": 0.5535046146,
"autogenerated": false,
"ratio": 3.510507880910683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7564012495510684,
"avg_score": null,
"num_lines": null
} |
"""A simple logger that logs to file and console"""
from logging import (getLogger,
FileHandler,
StreamHandler,
DEBUG,
Formatter,
Filter)
from gobble.config import settings, GOBBLE_MODE
class MultilineFilter(Filter):
"""Split a multi-line message over several log records"""
# http://stackoverflow.com/questions/22934616
# This is not considered good practice but it
# preserves indentation and improves readability.
# I log the same information twice anyway.
# TODO: display the correct (calling) module in the log record
def filter(self, record):
message = record.getMessage()
if '\n' not in message:
return True
for line in message.split('\n'):
log.debug(line)
return False
def _configure_logger(name):
logger = getLogger(name)
multiline = MultilineFilter()
logger.addFilter(multiline)
logger.setLevel(DEBUG)
if settings.FILE_LOG_LEVEL:
file = FileHandler(settings.LOG_FILE)
file.setLevel(settings.FILE_LOG_LEVEL)
file.setFormatter(Formatter(settings.FILE_LOG_FORMAT))
logger.addHandler(file)
if settings.CONSOLE_LOG_LEVEL:
stream = StreamHandler()
stream.setLevel(settings.CONSOLE_LOG_LEVEL)
stream.setFormatter(Formatter(settings.CONSOLE_LOG_FORMAT))
logger.addHandler(stream)
return logger
log = _configure_logger('Gobble')
log.debug('Gobble is running in %s mode', GOBBLE_MODE)
for key, value in vars(settings).items():
if key.isupper():
log.debug('%s = %s', key, value)
| {
"repo_name": "openspending/gobble",
"path": "gobble/logger.py",
"copies": "1",
"size": "1691",
"license": "mit",
"hash": -7323440387916573000,
"line_mean": 25.8412698413,
"line_max": 67,
"alpha_frac": 0.6339444116,
"autogenerated": false,
"ratio": 4.206467661691542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 63
} |
"""A simple log mechanism styled after PEP 282."""
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
raise ValueError('%s wrong log level' % str(level))
if level >= self.threshold:
if args:
msg = msg % args
if level in (WARN, ERROR, FATAL):
stream = sys.stderr
else:
stream = sys.stdout
if stream.errors == 'strict':
# emulate backslashreplace error handler
encoding = stream.encoding
msg = msg.encode(encoding, "backslashreplace").decode(encoding)
stream.write('%s\n' % msg)
stream.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| {
"repo_name": "RO-ny9/python-for-android",
"path": "python3-alpha/python3-src/Lib/distutils/log.py",
"copies": "163",
"size": "1908",
"license": "apache-2.0",
"hash": -6223391399144870000,
"line_mean": 24.44,
"line_max": 79,
"alpha_frac": 0.5775681342,
"autogenerated": false,
"ratio": 3.6551724137931036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00420979020979021,
"num_lines": 75
} |
"""A simple log mechanism styled after PEP 282."""
# This module should be kept compatible with Python 1.5.2.
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level >= self.threshold:
print msg % args
sys.stdout.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
_global_log.threshold = level
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| {
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"path": "Lib/distutils/log.py",
"copies": "3",
"size": "1345",
"license": "mit",
"hash": 9007253038514518000,
"line_mean": 21.0491803279,
"line_max": 66,
"alpha_frac": 0.6007434944,
"autogenerated": false,
"ratio": 3.345771144278607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018290725667774847,
"num_lines": 61
} |
"""A simple log mechanism styled after PEP 282."""
# This module should be kept compatible with Python 2.1.
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level >= self.threshold:
if not args:
# msg may contain a '%'. If args is empty,
# don't even try to string-format
print msg
else:
print msg % args
sys.stdout.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| {
"repo_name": "abaditsegay/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/log.py",
"copies": "146",
"size": "1606",
"license": "apache-2.0",
"hash": 4936593748975834000,
"line_mean": 22.2753623188,
"line_max": 66,
"alpha_frac": 0.5890410959,
"autogenerated": false,
"ratio": 3.5219298245614037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004575858923685011,
"num_lines": 69
} |
"""A simple log mechanism styled after PEP 282."""
# This module should be kept compatible with Python 2.1.
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level >= self.threshold:
if not args:
# msg may contain a '%'. If args is empty,
# don't even try to string-format
print msg
else:
print msg % args
sys.stdout.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/distutils/log.py",
"copies": "4",
"size": "1675",
"license": "mit",
"hash": -2784889648690216000,
"line_mean": 22.2753623188,
"line_max": 66,
"alpha_frac": 0.5647761194,
"autogenerated": false,
"ratio": 3.5867237687366167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6151499888136617,
"avg_score": null,
"num_lines": null
} |
"""A simple management script."""
import os
import random
import string
if os.path.exists('.env'):
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from wonk import create_app
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
from flask_migrate import Migrate
from flask_migrate import MigrateCommand
from wonk.models import db
migrate = Migrate(app, db)
from flask_script import Manager
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def genkey():
"Generates flask secret key."
keys = []
secret = ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(64))
with open('.env') as env:
for line in env.readlines():
if 'SECRET_KEY' not in line:
keys.append(line)
with open('.env', 'w') as env:
for key in keys:
env.write(key)
env.write('SECRET_KEY=%s' % secret)
if __name__ == '__main__':
manager.run()
| {
"repo_name": "beylsp/wonk-server",
"path": "manage.py",
"copies": "1",
"size": "1225",
"license": "mit",
"hash": 9123895510896137000,
"line_mean": 20.875,
"line_max": 72,
"alpha_frac": 0.6114285714,
"autogenerated": false,
"ratio": 3.4801136363636362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45915422077636364,
"avg_score": null,
"num_lines": null
} |
""" A simple manager for a task queue.
The manager handles creating, submitting, and managing
running jobs, and can even resubmit jobs that have failed.
author: Brian Schrader
since: 2015-08-27
"""
from .reporting import BaseReportingMixin, HtmlReportingMixin, TextReportingMixin
from .job_template import JobTemplate
class BaseQueue(object):
""" An abstract class for managing a queue of jobs. To use this class,
subclass it and fill in the callbacks you need.
"""
MAX_CONCURRENT_JOBS = 10
def __init__(self, name=''):
self.name = name
self.queue = []
self.running = []
self.failed = []
self.complete = []
def __repr__(self):
return '<Queue: jobs=%s>' % str(len(self.active_jobs))
@property
def is_empty(self):
return len(self.active_jobs) == 0
@property
def active_jobs(self):
""" Returns a list of all jobs submitted to the queue,
or in progress.
"""
return list(set(self.queue + self.running))
@property
def all_jobs(self):
""" Returns a list of all jobs submitted to the queue, complete,
in-progess or failed.
"""
return list(set(self.complete + self.failed + self.queue + self.running))
@property
def progress(self):
""" Returns the percentage, current and total number of
jobs in the queue.
"""
total = len(self.all_jobs)
remaining = total - len(self.active_jobs) if total > 0 else 0
percent = int(100 * (float(remaining) / total)) if total > 0 else 0
return percent
def ready(self, job):
""" Determines if the job is ready to be sumitted to the
queue. It checks if the job depends on any currently
running or queued operations.
"""
no_deps = len(job.depends_on) == 0
all_complete = all(j.is_complete() for j in self.active_jobs
if j.alias in job.depends_on)
none_failed = not any(True for j in self.failed
if j.alias in job.depends_on)
queue_is_open = len(self.running) < self.MAX_CONCURRENT_JOBS
return queue_is_open and (no_deps or (all_complete and none_failed))
def locked(self):
""" Determines if the queue is locked. """
if len(self.failed) == 0:
return False
for fail in self.failed:
for job in self.active_jobs:
if fail.alias in job.depends_on:
return True
def push(self, job):
""" Push a job onto the queue. This does not submit the job. """
self.queue.append(job)
def tick(self):
""" Submits all the given jobs in the queue and watches their
progress as they proceed. This function yields at the end of
each iteration of the queue.
:raises RuntimeError: If queue is locked.
"""
self.on_start()
while not self.is_empty:
cruft = []
for job in self.queue:
if not self.ready(job):
continue
self.on_ready(job)
try:
job.submit()
except ValueError:
if job.should_retry:
self.on_error(job)
job.attempts += 1
else:
self.on_fail(job)
cruft.append(job)
self.failed.append(job)
else:
self.running.append(job)
self.on_submit(job)
cruft.append(job)
self.queue = [job for job in self.queue if job not in cruft]
cruft = []
for job in self.running:
if job.is_running() or job.is_queued():
pass
elif job.is_complete():
self.on_complete(job)
cruft.append(job)
self.complete.append(job)
elif job.is_fail():
self.on_fail(job)
cruft.append(job)
self.failed.append(job)
elif job.is_error():
self.on_error(job)
cruft.append(job)
else:
pass
self.running = [job for job in self.running if job not in cruft]
if self.locked() and self.on_locked():
raise RuntimeError
self.on_tick()
yield
self.on_end()
# Callbacks...
def on_start(self):
""" Called when the queue is starting up. """
pass
def on_end(self):
""" Called when the queue is shutting down. """
pass
def on_locked(self):
""" Called when the queue is locked and no jobs can proceed.
If this callback returns True, then the queue will be restarted,
else it will be terminated.
"""
return True
def on_tick(self):
""" Called when a tick of the queue is complete. """
pass
def on_ready(self, job):
""" Called when a job is ready to be submitted.
:param job: The given job that is ready.
"""
pass
def on_submit(self, job):
""" Called when a job has been submitted.
:param job: The given job that has been submitted.
"""
pass
def on_complete(self, job):
""" Called when a job has completed.
:param job: The given job that has completed.
"""
pass
def on_error(self, job):
""" Called when a job has errored. By default, the job
is resubmitted until some max threshold is reached.
:param job: The given job that has errored.
"""
pass
def on_fail(self, job):
""" Called when a job has failed after multiple resubmissions. The
given job will be removed from the queue.
:param job: The given job that has errored.
"""
pass
class ReportingJobQueue(BaseReportingMixin, BaseQueue):
""" An abstract subclass of the Queue which reports on progress. """
@property
def real_jobs(self):
""" Returns all jobs that represent work. """
return [j for j in self.all_jobs if not isinstance(j, JobTemplate)]
def on_locked(self):
self.render('The queue is locked. Please check the logs.',
self.progress)
return True
def on_submit(self, job):
if not isinstance(job, JobTemplate):
self.render('Submitted: %s' % job.alias, self.progress)
def on_complete(self, job):
if not isinstance(job, JobTemplate):
self.render('Complete: %s' % job.alias, self.progress)
def on_error(self, job):
if not isinstance(job, JobTemplate):
self.render('Error: Job %s has failed, retrying (%s/%s)'
% (job.alias, str(job.attempts), str(job.MAX_RETRY)), self.progress)
def on_fail(self, job):
if not isinstance(job, JobTemplate):
self.render('Error: Job %s has failed. Retried %s times.'
% (job.alias, str(job.attempts)), self.progress)
def on_end(self):
self.render('All jobs are complete.', self.progress)
class HtmlReportingJobQueue(HtmlReportingMixin, ReportingJobQueue):
""" A queue that generates HTML reports. """
pass
class TextReportingJobQueue(TextReportingMixin, ReportingJobQueue):
""" A queue that generates textual reports. """
pass
| {
"repo_name": "TorkamaniLab/metapipe",
"path": "metapipe/models/queue.py",
"copies": "2",
"size": "7561",
"license": "mit",
"hash": -4920973104499271000,
"line_mean": 31.4506437768,
"line_max": 84,
"alpha_frac": 0.5557465944,
"autogenerated": false,
"ratio": 4.235854341736695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009747193658729864,
"num_lines": 233
} |
"""A simple mapping between keys and values, but with a limited capacity. When
the max. capacity is reached, the first inserted key/value pair is deleted
"""
# Copyright (c) 2009-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import deque
import collections
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.2'
__all__ = ['FIFODict']
class FIFODict:
"""A simple FIFO mapping between keys and values.
When the max. capacity is reached, the key/value pair that has been in
the dict the longest time is removed.
"""
def __init__(self, size, finalizer=None):
"""Create a FIFODict with the given maximum size.
Arguments:
- size: Determines the maximum size of the dict.
- finalizer: If finalizer is given, it must be a callable
f(key, value). It is then called, when a item is removed due to
the size of the dict reaching the maximum (finalizer is NOT called
when an item is explicitly deleted with del d[key] or when the
dict is cleared).
"""
if not type(size) == type(0):
raise TypeError("size must be an int")
if not size > 0:
raise ValueError("size must be positive")
if finalizer is not None and not isinstance(finalizer, collections.Callable):
raise TypeError("finalizer must be None or a callable")
self.__size = size
self.__data = {}
self.__order = deque()
self.__finalizer = finalizer
def add(self, key, val):
"""Add a key/value pair to the dict.
If a pair p with the same key already exists, p is replaced by the
new pair n, but n gets p's position in the FIFO dict and is deleted
when the old pair p would have been deleted. When the maximum
capacity is reached, the pair with the oldest key is deleted
from the dict.
The argument key is the key and the argument val is the value."""
if key in self.__data:
self.__data[key] = val # Replace old value
elif len(self.__order) < self.__size:
# The dict is not full yet. Just add the new pair.
self.__order.append(key)
self.__data[key] = val
else:
# The dict is full. We have to delete the oldest item first.
delKey = self.__order.popleft()
if self.__finalizer:
self.__finalizer(delKey, self.__data[delKey])
del self.__data[delKey]
self.__order.append(key)
self.__data[key] = val
def get(self, key, default=None):
"""Find and return the element a given key maps to.
Look for the given key in the dict and return the associated value
if found. If not found, the value of default is returned."""
return self.__data.get(key, default)
def clear(self):
"""Delete all key/value pairs from the dict"""
self.__data = {}
self.__order = []
def __setitem__(self, key, item):
self.add(key, item)
def __getitem__(self, key):
return self.__data[key]
def __len__(self):
return len(self.__data)
def __str__(self):
allitems = []
for key in self.__order:
val = self.__data[key]
item = "%s: %s" % (str(key), str(val))
allitems.append(item)
return "{%s}" % ", ".join(allitems)
def __contains__(self, item):
return (item in self.__data)
def __delitem__(self, item):
if item not in self.__data:
raise KeyError(item)
del self.__data[item]
self.__order.remove(item)
def __iter__(self):
for k in self.__order:
yield k
| {
"repo_name": "haleemur/pygrametl-python3",
"path": "pygrametl/FIFODict.py",
"copies": "1",
"size": "5160",
"license": "bsd-2-clause",
"hash": 7592236033727209000,
"line_mean": 37.5074626866,
"line_max": 85,
"alpha_frac": 0.6251937984,
"autogenerated": false,
"ratio": 4.3035863219349455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003949607927833635,
"num_lines": 134
} |
"""A simple mapping between keys and values, but with a limited capacity. When
the max. capacity is reached, the first inserted key/value pair is deleted
"""
# Copyright (c) 2009-2020, Aalborg University (pygrametl@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__all__ = ['FIFODict']
class FIFODictDeque:
"""A simple FIFO mapping between keys and values. Implemented using a dict
and a Deque. When the max. capacity is reached, the key/value pair that
has been in the dict the longest time is removed.
"""
def __init__(self, size, finalizer=None):
"""Create a FIFODictDeque with the given maximum size.
Arguments:
- size: Determines the maximum size of the dict.
- finalizer: If finalizer is given, it must be a callable
f(key, value). It is then called, when a item is removed due to
the size of the dict reaching the maximum (finalizer is NOT called
when an item is explicitly deleted with del d[key] or when the
dict is cleared).
"""
if not isinstance(size, type(0)):
raise TypeError("size must be an int")
if not size > 0:
raise ValueError("size must be positive")
if finalizer is not None and not callable(finalizer):
raise TypeError("finalizer must be None or a callable")
self.__size = size
self.__data = {}
self.__order = deque()
self.__finalizer = finalizer
def add(self, key, val):
"""Add a key/value pair to the dict.
If a pair p with the same key already exists, p is replaced by the
new pair n, but n gets p's position in the FIFO dict and is deleted
when the old pair p would have been deleted. When the maximum
capacity is reached, the pair with the oldest key is deleted
from the dict.
The argument key is the key and the argument val is the value.
"""
if key in self.__data:
self.__data[key] = val # Replace old value
elif len(self.__order) < self.__size:
# The dict is not full yet. Just add the new pair.
self.__order.append(key)
self.__data[key] = val
else:
# The dict is full. We have to delete the oldest item first.
delKey = self.__order.popleft()
if self.__finalizer:
self.__finalizer(delKey, self.__data[delKey])
del self.__data[delKey]
self.__order.append(key)
self.__data[key] = val
def get(self, key, default=None):
"""Find and return the element a given key maps to.
Look for the given key in the dict and return the associated value
if found. If not found, the value of default is returned.
"""
return self.__data.get(key, default)
def clear(self):
"""Delete all key/value pairs from the dict"""
self.__data = {}
self.__order = []
def __setitem__(self, key, item):
self.add(key, item)
def __getitem__(self, key):
return self.__data[key]
def __len__(self):
return len(self.__data)
def __str__(self):
allitems = []
for key in self.__order:
val = self.__data[key]
item = "%s: %s" % (str(key), str(val))
allitems.append(item)
return "{%s}" % ", ".join(allitems)
def __contains__(self, item):
return (item in self.__data)
def __delitem__(self, item):
if item not in self.__data:
raise KeyError(item)
del self.__data[item]
self.__order.remove(item)
def __iter__(self):
for k in self.__order:
yield k
class FIFODictOrderedDict:
"""A simple FIFO mapping between keys and values. Implemented using an
OrderedDict. When the max. capacity is reached, the key/value pair that
has been in the dict the longest time is removed.
"""
def __init__(self, size, finalizer=None):
"""Create a FIFODictOrderedDict with the given maximum size.
Arguments:
- size: Determines the maximum size of the dict.
- finalizer: If finalizer is given, it must be a callable
f(key, value). It is then called, when a item is removed due to
the size of the dict reaching the maximum (finalizer is NOT called
when an item is explicitly deleted with del d[key] or when the
dict is cleared).
"""
if not isinstance(size, type(0)):
raise TypeError("size must be an int")
if not size > 0:
raise ValueError("size must be positive")
if finalizer is not None and not callable(finalizer):
raise TypeError("finalizer must be None or a callable")
self.__size = size
self.__data = OrderedDict()
self.__finalizer = finalizer
def add(self, key, val):
"""Add a key/value pair to the dict.
If a pair p with the same key already exists, p is replaced by the
new pair n, but n gets p's position in the FIFO dict and is deleted
when the old pair p would have been deleted. When the maximum
capacity is reached, the pair with the oldest key is deleted
from the dict.
The argument key is the key and the argument val is the value.
"""
if key in self.__data:
self.__data[key] = val # Replace old value
elif len(self.__data) < self.__size:
# The dict is not full yet. Just add the new pair.
self.__data[key] = val
else:
# The dict is full. We have to delete the oldest item first.
if self.__finalizer:
(delKey, delValue) = self.__data.popitem(last=False)
self.__finalizer(delKey, delValue)
else:
self.__data.popitem(last=False)
self.__data[key] = val
def get(self, key, default=None):
"""Find and return the element a given key maps to.
Look for the given key in the dict and return the associated value
if found. If not found, the value of default is returned.
"""
return self.__data.get(key, default)
def clear(self):
"""Delete all key/value pairs from the dict"""
self.__data = OrderedDict()
def __setitem__(self, key, item):
self.add(key, item)
def __getitem__(self, key):
return self.__data[key]
def __len__(self):
return len(self.__data)
def __str__(self):
allitems = []
for key in self.__data:
val = self.__data[key]
item = "%s: %s" % (str(key), str(val))
allitems.append(item)
return "{%s}" % ", ".join(allitems)
def __contains__(self, item):
return (item in self.__data)
def __delitem__(self, item):
if item not in self.__data:
raise KeyError(item)
del self.__data[item]
def __iter__(self):
for k in self.__data:
yield k
# Exports the most appropiate version of FIFODict based on if
# OrderedDict is available in the version of Python used.
try:
from collections import OrderedDict
FIFODict = FIFODictOrderedDict
except ImportError:
from collections import deque
FIFODict = FIFODictDeque
| {
"repo_name": "chrthomsen/pygrametl",
"path": "pygrametl/FIFODict.py",
"copies": "1",
"size": "8707",
"license": "bsd-2-clause",
"hash": 1129533932957132300,
"line_mean": 35.2791666667,
"line_max": 80,
"alpha_frac": 0.6084759389,
"autogenerated": false,
"ratio": 4.338315894369706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5446791833269706,
"avg_score": null,
"num_lines": null
} |
"""A simple markov chain text generator."""
import random
import requests
class MarkovGenerator():
"""Pass the input string into the constructor."""
def __init__(self, string: str):
self.words = string.split()
self.number_of_words = len(self.words)
self.word_cache = {}
self.fill_cache()
def threes_generator(self):
"""Generator that yields the words in the input string in threes."""
if self.number_of_words < 3:
return
for index, word in enumerate(self.words[:-2]):
yield (word, self.words[index+1], self.words[index+2])
def fill_cache(self):
"""Method to fill cache dictionary with associated words."""
for word1, word2, word3 in self.threes_generator():
key = (word1, word2)
if key in self.word_cache:
self.word_cache[key].append(word3)
else:
self.word_cache[key] = [word3]
def create_markov_chain_text(self, length=100):
"""Returns a string generated, with an optional parameter of length in words."""
random_start_index = random.randint(0, self.number_of_words-3)
word1, word2 = self.words[random_start_index], self.words[random_start_index+1]
output = []
for x in range(length):
output.append(word1)
try:
word1, word2, = word2, random.choice(self.word_cache[(word1, word2)])
except KeyError:
return 'A KeyError occured.'
return ' '.join(output)
if __name__ == '__main__':
url = 'http://www.gutenberg.org/cache/epub/1661/pg1661.txt'
response = requests.get(url)
markgen = MarkovGenerator(response.text)
false_sherlock = markgen.create_markov_chain_text(400)
print('{}{}'.format(false_sherlock[0].upper(), false_sherlock[1:]))
| {
"repo_name": "DualSaturn/server-observer",
"path": "markov_generator.py",
"copies": "1",
"size": "1842",
"license": "mit",
"hash": -9107465236463776000,
"line_mean": 38.1914893617,
"line_max": 88,
"alpha_frac": 0.605320304,
"autogenerated": false,
"ratio": 3.6403162055335967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4745636509533597,
"avg_score": null,
"num_lines": null
} |
# A simple math addition game
# tutorial at www.techwooten.com
# import random number generator
from random import randint
from player import Player
# use input function to get the players name
name = input('Hello, may I have your name? ')
# create a new Player Object
player = Player(name)
# greet the player
print('Hello, ' + player.get_name() + '\n')
# this function uses the imported module to get a random number between 0 and 9
def get_number(start, end):
return randint(start, end)
# create a while loop to validate their input
while True:
# ask them if they want to play
play = input('Would you like to play a game? (Yes/No)')
# if their answer is not either yes or no require them to answer the question again
if play.lower() not in ('yes','no'):
# let user know they need to put in a correct answer
print('Please type either yes or no')
else:
# if they did answer yes or no this will run
if play.lower() == 'yes':
for _ in range(10):
first_number = get_number(0, 9) # getting the numbers to be added together
second_number = get_number(0, 9)
# below we ask the player for their answer
question = 'What is the answer to ' + str(first_number) + ' + ' + str(second_number) + '? '
bad_count = 0
while True:
try:
if bad_count > 3:
answer = -1
break
answer = int(input(question))
break
except ValueError:
print('Opps, your answer needs to be a number \n')
bad_count += 1
# generate the correct answer
correct_answer = first_number + second_number
# here we validate that the answer provided was the correct answer and update the score
if int(answer) == correct_answer:
player.update_score(1, True)
print('Good Job ' + player.get_name() + ', your score is ' + str(player.get_score()) + '\n')
else:
player.update_score(1, False)
print('Oh no! ' + player.get_name() + ', you got that answer wrong. Your score is ' +
str(player.get_score()) + '\n')
print("Good Job!, " + player.get_name() + ' answered ' +
player.get_correct_answers() +
' correct out of ' + player.get_total_questions() + '.')
# if they said no they don't want to play again then break out of the validation loop
if play.lower() == 'no' :
print('Ok, maybe next time')
break
| {
"repo_name": "techwooten/PythonMathGame",
"path": "v4/main.py",
"copies": "1",
"size": "2850",
"license": "mit",
"hash": 7323794156399576000,
"line_mean": 38.0410958904,
"line_max": 112,
"alpha_frac": 0.5305263158,
"autogenerated": false,
"ratio": 4.404945904173107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435472219973106,
"avg_score": null,
"num_lines": null
} |
# A simple math addition game
# tutorial at www.techwooten.com
# import random number generator
from random import randint
class Player:
__score = 0
__name = ''
def __init__(self, name):
self.__score = 0
self.__name = name
def set_name(self, name):
self.__name = name
def update_score(self, score):
self.__score = self.__score + score
def get_name(self):
return self.__name
def get_score(self):
return self.__score
# use input function to get the players name
name = input('Hello, may I have your name? ')
# create a new Player Object
player = Player(name)
# greet the player
print('Hello, ' + player.get_name() + '\n')
# this function uses the imported module to get a random number between 0 and 9
def get_number(start, end):
return randint(start, end)
# create a while loop to validate their input
while True:
# ask them if they want to play
play = input('Would you like to play a game? (Yes/No)')
# if their answer is not either yes or no require them to answer the question again
if play.lower() not in ('yes','no'):
# let user know they need to put in a correct answer
print('Please type either yes or no')
else:
# if they did answer yes or no this will run
if play.lower() == 'yes':
for _ in range(10):
first_number = get_number(0,9) # getting the numbers to be added together
second_number = get_number(0,9)
# below we ask the player for their answer
question = 'What is the answer to ' + str(first_number) + ' + ' + str(second_number) + '? '
bad_count = 0
while True:
try:
if bad_count > 3:
break
answer = int(input(question))
break
except ValueError:
print('Opps, your answer needs to be a number \n')
bad_count += 1
# generate the correct answer
correct_answer = first_number + second_number
# here we validate that the answer provided was the correct answer and update the score
if int(answer) == correct_answer:
player.update_score(+1)
print('Good Job ' + player.get_name() + ', your score is ' + str(player.get_score()) + '\n')
else:
player.update_score(-1)
print('Oh no! ' + player.get_name() + ', you got that answer wrong. Your score is ' +
str(player.get_score()) + '\n')
# if they said no they don't want to play again then break out of the validation loop
if play.lower() == 'no' :
print('Ok, maybe next time')
break
| {
"repo_name": "techwooten/PythonMathGame",
"path": "v3/main.py",
"copies": "1",
"size": "2957",
"license": "mit",
"hash": 3544924494136757000,
"line_mean": 32.2247191011,
"line_max": 113,
"alpha_frac": 0.5326344268,
"autogenerated": false,
"ratio": 4.329428989751098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5362063416551097,
"avg_score": null,
"num_lines": null
} |
# A simple math addition game
# tutorial at www.techwooten.com
# import random number generator
from random import randint
# use input function to get the players name
name = input('Hello, may I have your name? ')
# used to track score
score = 0
# greet the player
print('Hello, ' + name + '\n')
# this function uses the imported module to get a random number between 0 and 9
def get_number(start, end):
return randint(start, end)
# a loop to provide the player with 10 math problems
for _ in range(10):
first_number = get_number(0, 9) # getting the numbers to be added together
second_number = get_number(0, 9)
# below we ask the player for their answer
answer = input('What is the answer to ' + str(first_number) + ' + ' + str(second_number) + '? ')
# generate the correct answer
correct_answer = first_number + second_number
# here we validate that the answer provided was the correct answer and update the score
if int(answer) == correct_answer:
score += 1
print('Good Job ' + name + ', your score is ' + str(score) + '\n')
else:
score -= 1
print('Oh no! ' + name + ', you got that answer wrong. Your score is ' + str(score) + '\n')
| {
"repo_name": "techwooten/PythonMathGame",
"path": "v1/main.py",
"copies": "1",
"size": "1220",
"license": "mit",
"hash": -5676428521059782000,
"line_mean": 32.8888888889,
"line_max": 100,
"alpha_frac": 0.6573770492,
"autogenerated": false,
"ratio": 3.6526946107784433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.979703979329063,
"avg_score": 0.0026063733375625894,
"num_lines": 36
} |
"""A simple memcache-like server.
The basic data structure maintained is a single in-memory dictionary
mapping string keys to string values, with operations get, set and
delete. (Both keys and values may contain Unicode.)
This is a TCP server listening on port 54321. There is no
authentication.
Requests provide an operation and return a response. A connection may
be used for multiple requests. The connection is closed when a client
sends a bad request.
If a client is idle for over 5 seconds (i.e., it does not send another
request, or fails to read the whole response, within this time), it is
disconnected.
Framing of requests and responses within a connection uses a
line-based protocol. The first line of a request is the frame header
and contains three whitespace-delimited token followed by LF or CRLF:
- the keyword 'request'
- a decimal request ID; the first request is '1', the second '2', etc.
- a decimal byte count giving the size of the rest of the request
Note that the requests ID *must* be consecutive and start at '1' for
each connection.
Response frames look the same except the keyword is 'response'. The
response ID matches the request ID. There should be exactly one
response to each request and responses should be seen in the same
order as the requests.
After the frame, individual requests and responses are JSON encoded.
If the frame header or the JSON request body cannot be parsed, an
unframed error message (always starting with 'error') is written back
and the connection is closed.
JSON-encoded requests can be:
- {"type": "get", "key": <string>}
- {"type": "set", "key": <string>, "value": <string>}
- {"type": "delete", "key": <string>}
Responses are also JSON-encoded:
- {"status": "ok", "value": <string>} # Successful get request
- {"status": "ok"} # Successful set or delete request
- {"status": "notfound"} # Key not found for get or delete request
If the request is valid JSON but cannot be handled (e.g., the type or
key field is absent or invalid), an error response of the following
form is returned, but the connection is not closed:
- {"error": <string>}
"""
import argparse
import asyncio
import json
import logging
import os
import random
ARGS = argparse.ArgumentParser(description='Cache server example.')
ARGS.add_argument(
'--tls', action='store_true', dest='tls',
default=False, help='Use TLS')
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--host', action='store', dest='host',
default='localhost', help='Host name')
ARGS.add_argument(
'--port', action='store', dest='port',
default=54321, type=int, help='Port number')
ARGS.add_argument(
'--timeout', action='store', dest='timeout',
default=5, type=float, help='Timeout')
ARGS.add_argument(
'--random_failure_percent', action='store', dest='fail_percent',
default=0, type=float, help='Fail randomly N percent of the time')
ARGS.add_argument(
'--random_failure_sleep', action='store', dest='fail_sleep',
default=0, type=float, help='Sleep time when randomly failing')
ARGS.add_argument(
'--random_response_sleep', action='store', dest='resp_sleep',
default=0, type=float, help='Sleep time before responding')
args = ARGS.parse_args()
class Cache:
def __init__(self, loop):
self.loop = loop
self.table = {}
@asyncio.coroutine
def handle_client(self, reader, writer):
# Wrapper to log stuff and close writer (i.e., transport).
peer = writer.get_extra_info('socket').getpeername()
logging.info('got a connection from %s', peer)
try:
yield from self.frame_parser(reader, writer)
except Exception as exc:
logging.error('error %r from %s', exc, peer)
else:
logging.info('end connection from %s', peer)
finally:
writer.close()
@asyncio.coroutine
def frame_parser(self, reader, writer):
# This takes care of the framing.
last_request_id = 0
while True:
# Read the frame header, parse it, read the data.
# NOTE: The readline() and readexactly() calls will hang
# if the client doesn't send enough data but doesn't
# disconnect either. We add a timeout to each. (But the
# timeout should really be implemented by StreamReader.)
framing_b = yield from asyncio.wait_for(
reader.readline(),
timeout=args.timeout, loop=self.loop)
if random.random()*100 < args.fail_percent:
logging.warn('Inserting random failure')
yield from asyncio.sleep(args.fail_sleep*random.random(),
loop=self.loop)
writer.write(b'error random failure\r\n')
break
logging.debug('framing_b = %r', framing_b)
if not framing_b:
break # Clean close.
try:
frame_keyword, request_id_b, byte_count_b = framing_b.split()
except ValueError:
writer.write(b'error unparseable frame\r\n')
break
if frame_keyword != b'request':
writer.write(b'error frame does not start with request\r\n')
break
try:
request_id, byte_count = int(request_id_b), int(byte_count_b)
except ValueError:
writer.write(b'error unparsable frame parameters\r\n')
break
if request_id != last_request_id + 1 or byte_count < 2:
writer.write(b'error invalid frame parameters\r\n')
break
last_request_id = request_id
request_b = yield from asyncio.wait_for(
reader.readexactly(byte_count),
timeout=args.timeout, loop=self.loop)
try:
request = json.loads(request_b.decode('utf8'))
except ValueError:
writer.write(b'error unparsable json\r\n')
break
response = self.handle_request(request) # Not a coroutine.
if response is None:
writer.write(b'error unhandlable request\r\n')
break
response_b = json.dumps(response).encode('utf8') + b'\r\n'
byte_count = len(response_b)
framing_s = 'response {} {}\r\n'.format(request_id, byte_count)
writer.write(framing_s.encode('ascii'))
yield from asyncio.sleep(args.resp_sleep*random.random(),
loop=self.loop)
writer.write(response_b)
def handle_request(self, request):
# This parses one request and farms it out to a specific handler.
# Return None for all errors.
if not isinstance(request, dict):
return {'error': 'request is not a dict'}
request_type = request.get('type')
if request_type is None:
return {'error': 'no type in request'}
if request_type not in {'get', 'set', 'delete'}:
return {'error': 'unknown request type'}
key = request.get('key')
if not isinstance(key, str):
return {'error': 'key is not a string'}
if request_type == 'get':
return self.handle_get(key)
if request_type == 'set':
value = request.get('value')
if not isinstance(value, str):
return {'error': 'value is not a string'}
return self.handle_set(key, value)
if request_type == 'delete':
return self.handle_delete(key)
assert False, 'bad request type' # Should have been caught above.
def handle_get(self, key):
value = self.table.get(key)
if value is None:
return {'status': 'notfound'}
else:
return {'status': 'ok', 'value': value}
def handle_set(self, key, value):
self.table[key] = value
return {'status': 'ok'}
def handle_delete(self, key):
if key not in self.table:
return {'status': 'notfound'}
else:
del self.table[key]
return {'status': 'ok'}
def main():
asyncio.set_event_loop(None)
if args.iocp:
from asyncio.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
sslctx = None
if args.tls:
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
keyfile=os.path.join(here, 'ssl_key.pem'))
cache = Cache(loop)
task = asyncio.streams.start_server(cache.handle_client,
args.host, args.port,
ssl=sslctx, loop=loop)
svr = loop.run_until_complete(task)
for sock in svr.sockets:
logging.info('socket %s', sock.getsockname())
try:
loop.run_forever()
finally:
loop.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| {
"repo_name": "gvanrossum/asyncio",
"path": "examples/cachesvr.py",
"copies": "9",
"size": "9404",
"license": "apache-2.0",
"hash": 7401517286410854000,
"line_mean": 36.7670682731,
"line_max": 77,
"alpha_frac": 0.6063377286,
"autogenerated": false,
"ratio": 3.967932489451477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 249
} |
# A simple MicroBit game by Giles Booth
# Tilt and colour in all the squares
# If you win level 20, press reset button to play again
from microbit import display, accelerometer, sleep, Image
from music import play, POWER_UP, NYAN
def get_xy():
yaccel = accelerometer.get_y() * accelerometer_sensitivity
xaccel = accelerometer.get_x() * accelerometer_sensitivity
return yaccel, xaccel
def count_lit_pixels():
pixel_count = 0
for xx in range (5):
for yy in range (5):
if display.get_pixel(xx, yy) != 0:
pixel_count += 1
return pixel_count
pause = 100
level = 1
accelerometer_sensitivity=1/300
#set initial position
x, y = 2, 2
yaccel, xaccel = get_xy()
y = max(0, min(4, int(y + yaccel)))
x = max(0, min(4, int(x + xaccel)))
while pause > 0:
yaccel, xaccel = get_xy()
newy = max(0, min(4, int(y + yaccel)))
newx = max(0, min(4, int(x + xaccel)))
if newy != y or newx != x:
display.set_pixel(x, y, 1)
x, y = newx, newy
display.set_pixel(x, y, 9)
else:
display.set_pixel(newx, newy, 9)
pixels = count_lit_pixels()
if pixels == 25:
play(POWER_UP, wait=False)
level += 1
pause -= 5
sleep(200)
display.show(str(level))
sleep(1000)
display.clear()
sleep(pause)
play(NYAN, wait=False)
display.show('WIN!')
sleep(200)
display.show(Image.HEART)
| {
"repo_name": "blogmywiki/tilty",
"path": "tilty.py",
"copies": "1",
"size": "1430",
"license": "mit",
"hash": 1622655304067344000,
"line_mean": 25,
"line_max": 62,
"alpha_frac": 0.6020979021,
"autogenerated": false,
"ratio": 2.900608519269777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4002706421369777,
"avg_score": null,
"num_lines": null
} |
# A simple MicroBit game by Giles Booth
# Tilt and colour in all the squares
# If you win level 20, press reset button to play again
#
# MIT License (c) 2016 Giles Booth
from microbit import display, accelerometer, sleep, Image
from music import play, POWER_UP, NYAN
def get_xy():
yaccel = accelerometer.get_y() * accelerometer_sensitivity
xaccel = accelerometer.get_x() * accelerometer_sensitivity
return yaccel, xaccel
def count_lit_pixels():
pixel_count = 0
for xx in range (5):
for yy in range (5):
if display.get_pixel(xx, yy) != 0:
pixel_count += 1
return pixel_count
pause = 100
level = 0
accelerometer_sensitivity=1/300
#set initial position
x, y = 2, 2
yaccel, xaccel = get_xy()
y = max(0, min(4, int(y + yaccel)))
x = max(0, min(4, int(x + xaccel)))
while pause > 0:
yaccel, xaccel = get_xy()
newy = max(0, min(4, int(y + yaccel)))
newx = max(0, min(4, int(x + xaccel)))
if newy != y or newx != x:
display.set_pixel(x, y, 1)
x, y = newx, newy
display.set_pixel(x, y, 9)
else:
display.set_pixel(newx, newy, 9)
pixels = count_lit_pixels()
if pixels == 25:
play(POWER_UP, wait=False)
level += 1
pause -= 5
sleep(200)
display.show(str(level))
sleep(1000)
display.clear()
sleep(pause)
play(NYAN, wait=False)
display.show('WIN!')
sleep(200)
display.show(Image.HEART)
| {
"repo_name": "microbit-playground/microbit-playground.github.io",
"path": "programs/ninja/_posts/2016-02-17-tilty.py",
"copies": "1",
"size": "1458",
"license": "mit",
"hash": 4310813999682220500,
"line_mean": 24.5789473684,
"line_max": 62,
"alpha_frac": 0.6076817558,
"autogenerated": false,
"ratio": 2.8757396449704142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3983421400770414,
"avg_score": null,
"num_lines": null
} |
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import os
import io
from shutil import copyfile
import subprocess
if not (sys.version_info > (3, 0)):
range = xrange
from IPython import embed
import tensorflow as tf
from tensorflow.contrib import opt
from tensorflow.python.client import timeline
from itertools import product
import numpy as np
import pandas as pd
from IPython import embed
import json
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from datasets import Dataset, Datasets, convert_panda, split_panda, shuffle_panda
from nn_primitives import model_to_json, weight_variable, bias_variable, variable_summaries, nn_layer, normab, normsm, descale_panda, scale_panda
FLAGS = None
def timediff(start, event):
print('{:35} {:5.0f}s'.format(event + ' after', time.time() - start))
def print_last_row(df, header=False):
print(df.iloc[[-1]].to_string(header=header,
float_format=lambda x: u'{:.2f}'.format(x),
col_space=12,
justify='left'))
def prep_dataset(settings):
train_dims = settings['train_dims']
# Open HDF store. This is usually a soft link to our filtered dataset
try:
store = pd.HDFStore(settings['dataset_path'], 'r')
except IOError:
print('Could not find {!s} in {!s}'.format(settings['dataset_path'], os.path.abspath(os.curdir)))
raise
# Get the targets (train_dims) and features (input)
target_df = store.get(train_dims[0]).to_frame()
for target_name in train_dims[1:]:
target_df = pd.concat([target_df, store.get(target_name)], axis=1)
input_df = store.select('input')
store.close()
try:
del input_df['nions'] # Delete leftover artifact from dataset split
except KeyError:
pass
# Nustar relates to the targets with a log
try:
input_df['logNustar'] = np.log10(input_df['Nustar'])
del input_df['Nustar']
except KeyError:
print('No Nustar in dataset')
if settings['drop_outlier_above'] < 1:
target_df = target_df[target_df < target_df.quantile(settings['drop_outlier_above'])]
if settings['drop_outlier_below'] > 0:
target_df = target_df[target_df > target_df.quantile(settings['drop_outlier_below'])]
# Remove NaNs
target_df = target_df.loc[(target_df.dropna()).index]
# Use only samples in the feature set that are in the target set
input_df = input_df.loc[target_df.index]
# Convert to dtype in settings file. Usually float32 or float64
input_df = input_df.astype(settings['dtype'])
target_df = target_df.astype(settings['dtype'])
return input_df, target_df
def standardize(input_df, target_df, settings, warm_start_nn):
if warm_start_nn is None:
if settings['standardization'].startswith('minmax'):
min = float(settings['standardization'].split('_')[-2])
max = float(settings['standardization'].split('_')[-1])
scale_factor, scale_bias = normab(pd.concat([input_df, target_df], axis=1), min, max)
if settings['standardization'].startswith('normsm'):
s_t = float(settings['standardization'].split('_')[-2])
m_t = float(settings['standardization'].split('_')[-1])
scale_factor, scale_bias = normsm(pd.concat([input_df, target_df], axis=1), s_t, m_t)
else:
scale_factor = pd.concat([warm_start_nn._feature_prescale_factor,
warm_start_nn._target_prescale_factor])
scale_bias = pd.concat([warm_start_nn._feature_prescale_bias,
warm_start_nn._target_prescale_bias])
input_df = scale_panda(input_df, scale_factor, scale_bias)
target_df = scale_panda(target_df, scale_factor, scale_bias)
return input_df, target_df, scale_factor, scale_bias
class QLKNet:
def __init__(self, x, num_target_dims, settings, debug=False, warm_start_nn=None):
self.x = x
self.NUM_TARGET_DIMS = num_target_dims
self.SETTINGS = settings
self.DEBUG = debug
self.WARM_START_NN = warm_start_nn
self.create()
def create(self):
x = self.x
settings = self.SETTINGS
debug = self.DEBUG
warm_start_nn = self.WARM_START_NN
num_target_dims = self.NUM_TARGET_DIMS
layers = [x]
if settings['drop_chance'] != 0:
drop_prob = tf.constant(settings['drop_chance'], dtype=x.dtype)
self.is_train = tf.placeholder(tf.bool)
for ii, (activation, neurons) in enumerate(zip(settings['hidden_activation'], settings['hidden_neurons']), start=1):
if warm_start_nn is None:
weight_init = bias_init = 'norm_1_0'
else:
if (warm_start_nn.layers[ii - 1]._activation == activation and
warm_start_nn.layers[ii - 1]._weights.shape[1] == neurons):
weight_init = warm_start_nn.layers[ii - 1]._weights
bias_init = warm_start_nn.layers[ii - 1]._biases
activation = warm_start_nn.layers[ii - 1]._activation
else:
raise Exception('Settings file layer shape does not match warm_start_nn')
if activation == 'tanh':
act = tf.tanh
elif activation == 'relu':
act = tf.nn.relu
elif activation == 'none':
act = None
layer = nn_layer(layers[-1], neurons, 'layer' + str(ii), dtype=x.dtype, act=act, debug=debug, bias_init=bias_init, weight_init=weight_init)
if settings['drop_chance'] != 0:
dropout = tf.layers.dropout(layer, drop_prob, training=self.is_train)
if debug:
tf.summary.histogram('post_dropout_layer_' + str(ii), dropout)
layers.append(dropout)
else:
layers.append(layer)
# Last layer (output layer) usually has no activation
activation = settings['output_activation']
if warm_start_nn is None:
weight_init = bias_init = 'norm_1_0'
else:
weight_init = warm_start_nn.layers[-1]._weights
bias_init = warm_start_nn.layers[-1]._biases
activation = warm_start_nn.layers[-1]._activation
if activation == 'tanh':
act = tf.tanh
elif activation == 'relu':
act = tf.nn.relu
elif activation == 'none':
act = None
self.y = nn_layer(layers[-1], num_target_dims, 'layer' + str(len(layers)), dtype=x.dtype, act=act, debug=debug, bias_init=bias_init, weight_init=weight_init)
def train(settings, warm_start_nn=None):
tf.reset_default_graph()
start = time.time()
input_df, target_df = prep_dataset(settings)
input_df, target_df, scale_factor, scale_bias = standardize(input_df, target_df, settings, warm_start_nn=warm_start_nn)
# Standardize input
timediff(start, 'Scaling defined')
train_dims = target_df.columns
scan_dims = input_df.columns
datasets = convert_panda(input_df, target_df, settings['validation_fraction'], settings['test_fraction'])
# Start tensorflow session
config = tf.ConfigProto()
#config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, \
# allow_soft_placement=True, device_count = {'CPU': 1})
sess = tf.Session(config=config)
# Input placeholders
with tf.name_scope('input'):
x = tf.placeholder(datasets.train._target.dtypes.iloc[0],
[None, len(scan_dims)], name='x-input')
y_ds = tf.placeholder(x.dtype, [None, len(train_dims)], name='y-input')
net = QLKNet(x, len(train_dims), settings, warm_start_nn=warm_start_nn)
y = net.y
y_descale = (net.y - scale_bias[train_dims].values) / scale_factor[train_dims].values
y_ds_descale = (y_ds - scale_bias[train_dims].values) / scale_factor[train_dims].values
is_train = net.is_train
timediff(start, 'NN defined')
# Define loss functions
with tf.name_scope('Loss'):
with tf.name_scope('mse'):
mse = tf.losses.mean_squared_error(y_ds, y)
mse_descale = tf.losses.mean_squared_error(y_ds_descale, y_descale)
tf.summary.scalar('MSE', mse)
with tf.name_scope('mabse'):
mabse = tf.losses.absolute_difference(y_ds, y)
tf.summary.scalar('MABSE', mabse)
with tf.name_scope('l2'):
l2_scale = tf.Variable(settings['cost_l2_scale'], dtype=x.dtype, trainable=False)
#l2_norm = tf.reduce_sum(tf.square())
#l2_norm = tf.to_double(tf.add_n([tf.nn.l2_loss(var)
# for var in tf.trainable_variables()
# if 'weights' in var.name]))
l2_norm = (tf.add_n([tf.nn.l2_loss(var)
for var in tf.trainable_variables()
if 'weights' in var.name]))
#mse = tf.losses.mean_squared_error(y_, y)
# TODO: Check normalization
l2_loss = l2_scale * l2_norm
tf.summary.scalar('l2_norm', l2_norm)
tf.summary.scalar('l2_scale', l2_scale)
tf.summary.scalar('l2_loss', l2_loss)
with tf.name_scope('l1'):
l1_scale = tf.Variable(settings['cost_l1_scale'], dtype=x.dtype, trainable=False)
#l1_norm = tf.to_double(tf.add_n([tf.reduce_sum(tf.abs(var))
# for var in tf.trainable_variables()
# if 'weights' in var.name]))
l1_norm = (tf.add_n([tf.reduce_sum(tf.abs(var))
for var in tf.trainable_variables()
if 'weights' in var.name]))
# TODO: Check normalization
l1_loss = l1_scale * l1_norm
tf.summary.scalar('l1_norm', l1_norm)
tf.summary.scalar('l1_scale', l1_scale)
tf.summary.scalar('l1_loss', l1_loss)
if settings['goodness'] == 'mse':
loss = mse
elif settings['goodness'] == 'mabse':
loss = mabse
if settings['cost_l1_scale'] != 0:
loss += l1_loss
if settings['cost_l2_scale'] != 0:
loss += l2_loss
tf.summary.scalar('loss', loss)
optimizer = None
train_step = None
# Define optimizer algorithm.
with tf.name_scope('train'):
lr = settings['learning_rate']
if settings['optimizer'] == 'adam':
beta1 = settings['adam_beta1']
beta2 = settings['adam_beta2']
train_step = tf.train.AdamOptimizer(lr,
beta1,
beta2,
).minimize(loss)
elif settings['optimizer'] == 'adadelta':
rho = settings['adadelta_rho']
train_step = tf.train.AdadeltaOptimizer(lr,
rho,
).minimize(loss)
elif settings['optimizer'] == 'rmsprop':
decay = settings['rmsprop_decay']
momentum = settings['rmsprop_momentum']
train_step = tf.train.RMSPropOptimizer(lr,
decay,
momentum).minimize(loss)
elif settings['optimizer'] == 'grad':
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
elif settings['optimizer'] == 'lbfgs':
optimizer = opt.ScipyOptimizerInterface(loss,
options={'maxiter': settings['lbfgs_maxiter'],
'maxfun': settings['lbfgs_maxfun'],
'maxls': settings['lbfgs_maxls']})
#tf.logging.set_verbosity(tf.logging.INFO)
# Merge all the summaries
merged = tf.summary.merge_all()
# Initialze writers, variables and logdir
log_dir = 'tf_logs'
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)
validation_writer = tf.summary.FileWriter(log_dir + '/validation', sess.graph)
tf.global_variables_initializer().run(session=sess)
timediff(start, 'Variables initialized')
epoch = 0
train_log = pd.DataFrame(columns=['epoch', 'walltime', 'loss', 'mse', 'mabse', 'l1_norm', 'l2_norm'])
validation_log = pd.DataFrame(columns=['epoch', 'walltime', 'loss', 'mse', 'mabse', 'l1_norm', 'l2_norm'])
# Split dataset in minibatches
minibatches = settings['minibatches']
batch_size = int(np.floor(datasets.train.num_examples/minibatches))
timediff(start, 'Starting loss calculation')
xs, ys = datasets.validation.next_batch(-1, shuffle=False)
feed_dict = {x: xs, y_ds: ys, is_train: False}
summary, lo, meanse, meanabse, l1norm, l2norm = sess.run([merged, loss, mse, mabse, l1_norm, l2_norm],
feed_dict=feed_dict)
train_log.loc[0] = (epoch, 0, lo, meanse, meanabse, l1norm, l2norm)
validation_log.loc[0] = (epoch, 0, lo, meanse, meanabse, l1norm, l2norm)
# Save checkpoints of training to restore for early-stopping
saver = tf.train.Saver(max_to_keep=settings['early_stop_after'] + 1)
checkpoint_dir = 'checkpoints'
tf.gfile.MkDir(checkpoint_dir)
# Define variables for early stopping
not_improved = 0
best_early_measure = np.inf
early_measure = np.inf
max_epoch = settings.get('max_epoch') or sys.maxsize
# Set debugging parameters
setting = lambda x, default: default if x is None else x
steps_per_report = setting(settings.get('steps_per_report'), np.inf)
epochs_per_report = setting(settings.get('epochs_per_report'),np.inf)
save_checkpoint_networks = setting(settings.get('save_checkpoint_networks'), False)
save_best_networks = setting(settings.get('save_best_networks'), False)
track_training_time = setting(settings.get('track_training_time'), False)
# Set up log files
train_log_file = open('train_log.csv', 'a', 1)
train_log_file.truncate(0)
train_log.to_csv(train_log_file)
validation_log_file = open('validation_log.csv', 'a', 1)
validation_log_file.truncate(0)
validation_log.to_csv(validation_log_file)
timediff(start, 'Training started')
train_start = time.time()
ii = 0
try:
for epoch in range(max_epoch):
for step in range(minibatches):
# Extra debugging every steps_per_report
if not step % steps_per_report and steps_per_report != np.inf:
print('debug!', epoch, step)
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
else:
run_options = None
run_metadata = None
xs, ys = datasets.train.next_batch(batch_size, shuffle=True)
feed_dict = {x: xs, y_ds: ys, is_train: True}
# If we have a scipy-style optimizer
if optimizer:
#optimizer.minimize(sess, feed_dict=feed_dict)
optimizer.minimize(sess,
feed_dict=feed_dict,
# options=run_options,
# run_metadata=run_metadata)
)
lo = loss.eval(feed_dict=feed_dict)
meanse = mse.eval(feed_dict=feed_dict)
meanabse = mabse.eval(feed_dict=feed_dict)
l1norm = l1_norm.eval(feed_dict=feed_dict)
l2norm = l2_norm.eval(feed_dict=feed_dict)
summary = merged.eval(feed_dict=feed_dict)
else: # If we have a TensorFlow-style optimizer
summary, lo, meanse, meanabse, l1norm, l2norm, _ = sess.run([merged, loss, mse, mabse, l1_norm, l2_norm, train_step],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata
)
train_writer.add_summary(summary, ii)
# Extra debugging every steps_per_report
if not step % steps_per_report and steps_per_report != np.inf:
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline_run.json', 'w') as f:
f.write(ctf)
train_writer.add_run_metadata(run_metadata, 'epoch%d step%d' % (epoch, step))
# Add to CSV log buffer
if track_training_time is True:
train_log.loc[epoch * minibatches + step] = (epoch, time.time() - train_start, lo, meanse, meanabse, l1norm, l2norm)
########
# After-epoch stuff
########
if track_training_time is True:
step_start = time.time()
epoch = datasets.train.epochs_completed
xs, ys = datasets.validation.next_batch(-1, shuffle=False)
feed_dict = {x: xs, y_ds: ys, is_train: False}
# Run with full trace every epochs_per_report Gives full runtime information
if not epoch % epochs_per_report and epochs_per_report != np.inf:
print('epoch_debug!', epoch)
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
else:
run_options = None
run_metadata = None
# Calculate all variables with the validation set
summary, lo, meanse, meanabse, l1norm, l2norm = sess.run([merged, loss, mse, mabse, l1_norm, l2_norm],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
validation_writer.add_summary(summary, ii)
# More debugging every epochs_per_report
if not epoch % epochs_per_report and epochs_per_report != np.inf:
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
validation_writer.add_run_metadata(run_metadata, 'epoch%d' % epoch)
# Save checkpoint
save_path = saver.save(sess, os.path.join(checkpoint_dir,
'model.ckpt'), global_step=ii, write_meta_graph=False)
# Update CSV logs
if track_training_time is True:
validation_log.loc[epoch] = (epoch, time.time() - train_start, lo, meanse, meanabse, l1norm, l2norm)
validation_log.loc[epoch:].to_csv(validation_log_file, header=False)
validation_log = validation_log[0:0] #Flush validation log
train_log.loc[epoch * minibatches:].to_csv(train_log_file, header=False)
train_log = train_log[0:0] #Flush train_log
# Determine early-stopping criterion
if settings['early_stop_measure'] == 'mse':
early_measure = meanse
elif settings['early_stop_measure'] == 'loss':
early_measure = lo
elif settings['early_stop_measure'] == 'none':
early_measure = np.nan
# Early stopping, check if measure is better
if early_measure < best_early_measure:
best_early_measure = early_measure
if save_best_networks:
nn_best_file = os.path.join(checkpoint_dir,
'nn_checkpoint_' + str(epoch) + '.json')
trainable = {x.name: tf.to_double(x).eval(session=sess).tolist() for x in tf.trainable_variables()}
model_to_json(nn_best_file,
trainable,
scan_dims.values.tolist(),
train_dims.values.tolist(),
datasets.train, scale_factor.astype('float64'),
scale_bias.astype('float64'),
l2_scale,
settings)
not_improved = 0
else: # If early measure is not better
not_improved += 1
# If not improved in 'early_stop' epoch, stop
if settings['early_stop_measure'] != 'none' and not_improved >= settings['early_stop_after']:
if save_checkpoint_networks:
nn_checkpoint_file = os.path.join(checkpoint_dir,
'nn_checkpoint_' + str(epoch) + '.json')
trainable = {x.name: tf.to_double(x).eval(session=sess).tolist() for x in tf.trainable_variables()}
model_to_json(nn_checkpoint_file,
trainable,
scan_dims.values.tolist(),
train_dims.values.tolist(),
datasets.train, scale_factor.astype('float64'),
scale_bias.astype('float64'),
l2_scale,
settings)
print('Not improved for %s epochs, stopping..'
% (not_improved))
break
# Stop if loss is nan or inf
if np.isnan(lo) or np.isinf(lo):
print('Loss is {}! Stopping..'.format(lo))
break
# Stop on Ctrl-C
except KeyboardInterrupt:
print('KeyboardInterrupt Stopping..')
train_writer.close()
validation_writer.close()
# Restore checkpoint with best epoch
try:
best_epoch = epoch - not_improved
saver.restore(sess, saver.last_checkpoints[best_epoch - epoch])
except IndexError:
print("Can't restore old checkpoint, just saving current values")
best_epoch = epoch
validation_log.loc[epoch] = (epoch, time.time() - train_start, lo, meanse, meanabse, l1norm, l2norm)
train_log.loc[epoch * minibatches + step] = (epoch, time.time() - train_start, lo, meanse, meanabse, l1norm, l2norm)
validation_log.loc[epoch:].to_csv(validation_log_file, header=False)
train_log.loc[epoch * minibatches:].to_csv(train_log_file, header=False)
train_log_file.close()
del train_log
validation_log_file.close()
del validation_log
trainable = {x.name: tf.to_double(x).eval(session=sess).tolist() for x in tf.trainable_variables()}
model_to_json('nn.json',
trainable,
scan_dims.values.tolist(),
train_dims.values.tolist(),
datasets.train,
scale_factor,
scale_bias.astype('float64'),
l2_scale,
settings)
print("Best epoch was {:d} with measure '{:s}' of {:f} ".format(best_epoch, settings['early_stop_measure'], best_early_measure))
print("Training time was {:.0f} seconds".format(time.time() - train_start))
# Finally, check against validation set
xs, ys = datasets.validation.next_batch(-1, shuffle=False)
feed_dict = {x: xs, y_ds: ys, is_train: False}
rms_val = np.round(np.sqrt(mse.eval(feed_dict, session=sess)), 4)
rms_val_descale = np.round(np.sqrt(mse_descale.eval(feed_dict, session=sess)), 4)
loss_val = np.round(loss.eval(feed_dict, session=sess), 4)
print('{:22} {:5.2f}'.format('Validation RMS error: ', rms_val))
print('{:22} {:5.2f}'.format('Descaled validation RMS error: ', rms_val_descale))
print('{:22} {:5.2f}'.format('Validation loss: ', loss_val))
metadata = {'epoch': epoch,
'best_epoch': best_epoch,
'rms_validation': float(rms_val),
'loss_validation': float(loss_val),
'rms_validation_descaled': float(rms_val_descale),
}
# Add metadata dict to nn.json
with open('nn.json') as nn_file:
data = json.load(nn_file)
data['_metadata'] = metadata
with open('nn.json', 'w') as nn_file:
json.dump(data, nn_file, sort_keys=True, indent=4, separators=(',', ': '))
sess.close()
def train_NDNN_from_folder(warm_start_nn=None):
with open('./settings.json') as file_:
settings = json.load(file_)
train(settings, warm_start_nn=warm_start_nn)
def main(_):
nn=None
#from run_model import QuaLiKizNDNN
#nn = QuaLiKizNDNN.from_json('nn.json')
train_NDNN_from_folder(warm_start_nn=nn)
if __name__ == '__main__':
#parser = argparse.ArgumentParser()
#parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
# default=True,
# help='If true, uses fake data for unit testing.')
#parser.add_argument('--max_steps', type=int, default=100000,
#parser.add_argument('--max_steps', type=int, default=sys.maxsize,
# help='Number of steps to run trainer.')
#parser.add_argument('--learning_rate', type=float, default=10.,
# help='Initial learning rate')
#parser.add_argument('--dropout', type=float, default=0.9,
# help='Keep probability for training dropout.')
#parser.add_argument('--data_dir', type=str,
# default='train_NN_run/input_data/',
# help='Directory for storing input data')
#parser.add_argument('--log_dir', type=str, default='train_NN_run/logs/',
# help='Summaries log directory')
#FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]])
| {
"repo_name": "Karel-van-de-Plassche/QLKNN-develop",
"path": "tests/gen2_test_files/train_NDNN.py",
"copies": "7",
"size": "27120",
"license": "mit",
"hash": -9111721987032919000,
"line_mean": 44.2754590985,
"line_max": 165,
"alpha_frac": 0.5543879056,
"autogenerated": false,
"ratio": 3.868758915834522,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002956681285016388,
"num_lines": 599
} |
"""A simple model and MCMC execution routine for exploring the properties of the
Gelman-Rubin convergence criterion."""
from pysb import *
from pysb.integrate import odesolve
from matplotlib import pyplot as plt
import numpy as np
import bayessb
import pysb.util
import pickle
# Define a simple exponential decay model with two parameters, the initial
# value A_0 and the rate parameter k
Model()
Monomer('A')
Initial(A(), Parameter('A_0', 2))
Rule('Decay_A', A() >> None, Parameter('k', 1))
Observable('A_', A())
# Define the time span, number of steps, number of chains, and other globals
tmax = 10
tspan = np.linspace(0, tmax, 100)
random_seed = 1
sigma = 0.1
nsteps = 8000
num_chains = 5
synthetic_data = pysb.util.synthetic_data(model, tspan)
def do_fit(iteration):
"""Runs MCMC on the globally defined model."""
def likelihood(mcmc, position):
yout = mcmc.simulate(position, observables=True)
err = np.sum((synthetic_data['A_'] - yout['A_'])**2 / (2*sigma**2))
return err
# Initialize the MCMC arguments
opts = bayessb.MCMCOpts()
opts.model = model
opts.tspan = tspan
# Because there is a degradation reaction, there is a __source_0
# parameter in the model that we need to ignore
opts.estimate_params = [p for p in model.parameters
if p.name != '__source_0']
# Choose MCMC start values randomly from [0, 10)
opts.initial_values = np.random.uniform(0, 10, 2)
opts.nsteps = nsteps
opts.likelihood_fn = likelihood
opts.step_fn = step
opts.use_hessian = True
opts.hessian_period = opts.nsteps / 10
opts.seed = random_seed
mcmc = bayessb.MCMC(opts)
mcmc.run()
# Pickle it!
basename = 'convergence_test'
output_basename = '%s_%d_steps_seed_%d_iter_%d' % \
(basename, opts.nsteps, random_seed, iteration)
mcmc.options.likelihood_fn = None
output_file = open('%s.pck' % output_basename, 'w')
pickle.dump(mcmc, output_file)
output_file.close()
# Show best fit params
mcmc.position = mcmc.positions[np.argmin(mcmc.likelihoods)]
best_fit_params = mcmc.cur_params(position=mcmc.position)
p_name_vals = zip([p.name for p in model.parameters], best_fit_params)
print('\n'.join(['%s: %g' % (p_name_vals[i][0], p_name_vals[i][1])
for i in range(0, len(p_name_vals))]))
return mcmc
def step(mcmc):
"""The function to call at every iteration. Currently just prints
out a few progress indicators.
"""
if mcmc.iter % 200 == 0:
print 'iter=%-5d sigma=%-.3f T=%-.3f acc=%-.3f, lkl=%g prior=%g post=%g' % \
(mcmc.iter, mcmc.sig_value, mcmc.T, mcmc.acceptance/(mcmc.iter+1),
mcmc.accept_likelihood, mcmc.accept_prior, mcmc.accept_posterior)
def plot_model_data():
"""Plots a simulation of the model along with the data."""
plt.ion()
x = odesolve(model, tspan)
plt.plot(tspan, x['A_'])
plt.plot(tspan, synthetic_data['A_'])
plt.show()
if __name__ == '__main__':
#run_model()
# Run a series of chains
chain_set = []
for i in range(0, num_chains):
mcmc = do_fit(i)
# Get the positions
mixed_start = nsteps / 2
mixed_positions = mcmc.positions[mixed_start:,:]
#mixed_accepts = mcmc.accepts[mixed_start:]
#mixed_accept_positions = mixed_positions[mixed_accepts]
# Convert to linear scale
mixed_positions_linear = 10**mixed_positions
#mixed_accept_positions_linear = 10**mixed_accept_positions
# Add to list
chain_set.append(mixed_positions_linear)
| {
"repo_name": "jmuhlich/bayessb",
"path": "examples/convergence.py",
"copies": "1",
"size": "3660",
"license": "bsd-2-clause",
"hash": 8513122733367172000,
"line_mean": 30.8260869565,
"line_max": 89,
"alpha_frac": 0.6333333333,
"autogenerated": false,
"ratio": 3.2189973614775726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43523306947775725,
"avg_score": null,
"num_lines": null
} |
""" A simple model of a standard-compliant Microblog Feed.
This module contains wrappers for all 3 Microblog feed files
(feed, follows, blocks) and adhered to the Open Microblog
Standard. For more information see:
http://openmicroblog.com
author: Brian Schrader
since: 2015-06-01
standard-version: 0.5
"""
from __future__ import print_function
import sys
from xml.dom import minidom
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class MalformedFeedError(Exception):
pass
# Feed Model
MICROBLOG_NAMESPACE = 'microblog'
class Feed(object):
""" The base object that represents an XML feed. """
def __init__(self, raw_text=''):
""" Using the raw text provided, pull out the
relevant information.
"""
self._tree = minidom.parseString(
raw_text).getElementsByTagName('channel')[0]
class MainFeed(Feed):
""" Models a user's feed.xml feed.
This feed contains a list of status messages which
the given user has posted.
"""
REQUIRED_RSS_ELEMENTS = set(['link', 'lastBuildDate', 'language'])
OPTIONAL_RSS_ELEMENTS = set(['docs', 'description'])
REQUIRED_MICROBLOG_ELEMENTS = set(['username', 'user_id', 'profile'])
OPTIONAL_MICROBLOG_ELEMENTS = set(['blocks', 'follows', 'message',
'user_full_name', 'next_node'])
def __init__(self, raw_text=''):
super(MainFeed, self).__init__(raw_text)
tree = self._tree
self.items = []
for attr in self.REQUIRED_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{attr}'.format(attr=attr))[0].firstChild
if value:
value = value.nodeValue
else:
value = ''
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{attr}'.format(attr=attr))[0].firstChild.nodeValue
except IndexError:
continue
setattr(self, attr, value)
for attr in self.REQUIRED_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
pass
else:
setattr(self, attr, value)
for element in tree.getElementsByTagName('item'):
self.items.append(MainFeedItem(element.toxml('utf-8')))
class UserFeed(Feed):
""" Models a user's follows.xml or blocks.xml feed.
This feed contains a list of users which the given
user either follows or blocks.
"""
def __init__(self, raw_text=''):
super(UserFeed, self).__init__(raw_text)
tree = self._tree
self.items = []
for element in tree.getElementsByTagName('item'):
self.items.append(UserFeedItem(element.firstChild.nodeValue))
# Item Model
class Item(object):
""" The base object that represents a generic item in a feed. """
def __init__(self, raw_text=''):
self._tree = minidom.parseString(raw_text)
class MainFeedItem(Item):
""" Models an item found in the main feed representing
a status message.
"""
REQUIRED_RSS_ELEMENTS = set(['guid', 'pubDate', 'description'])
OPTIONAL_RSS_ELEMENTS = set()
REQUIRED_MICROBLOG_ELEMENTS = set()
OPTIONAL_MICROBLOG_ELEMENTS = set([
'reply', 'in_reply_to_user_id', 'in_reply_to_user_link',
'in_reply_to_status_id', 'reposted_status_user_id',
'reposted_user_link', 'reposted_status_id', 'reposted_status_pubdate',
'language'])
def __init__(self, raw_text=''):
super(MainFeedItem, self).__init__(raw_text)
self.items = []
for attr in self.REQUIRED_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName('{attr}'.format(
ns=MICROBLOG_NAMESPACE, attr=attr))[0].firstChild.nodeValue
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
continue
setattr(self, attr, value)
for attr in self.REQUIRED_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
continue
setattr(self, attr, value)
class UserFeedItem(Item):
""" Models an item found in the blocks or follows feed
representing a user.
"""
REQUIRED_RSS_ELEMENTS = set()
OPTIONAL_RSS_ELEMENTS = set()
REQUIRED_MICROBLOG_ELEMENTS = set()
OPTIONAL_MICROBLOG_ELEMENTS = set(['user_id', 'username', 'user_link'])
def __init__(self, raw_text=''):
super(UserFeedItem, self).__init__(raw_text)
self.items = []
for attr in self.REQUIRED_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{attr}'.format(attr=attr))[0].firstChild.nodeValue
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_RSS_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{attr}'.format(attr=attr))[0].firstChild.nodeValue
except IndexError:
continue
setattr(self, attr, value)
for attr in self.REQUIRED_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
raise MalformedFeedError(
'Feed must contain all required elements: '
'{0} is missing.'.format(attr))
setattr(self, attr, value)
for attr in self.OPTIONAL_MICROBLOG_ELEMENTS:
try:
value = self._tree.getElementsByTagName(
'{ns}:{attr}'.format(ns=MICROBLOG_NAMESPACE, attr=attr)
)[0].firstChild.nodeValue
except IndexError:
continue
setattr(self, attr, value)
if __name__ == '__main__':
try:
url = sys.argv[1]
except IndexError:
print('You must include a url to validate.')
sys.exit(1)
feed = MainFeed(raw_text=urlopen(url).read())
if len(feed.items) > 0:
description = feed.items[-1].description
print('\n{0} says, "{1}"'.format(feed.username, description))
else:
print('\n{0} hasn\'t said anything yet.'.format(feed.username))
| {
"repo_name": "Sonictherocketman/Open-Microblog",
"path": "validator.py",
"copies": "1",
"size": "8835",
"license": "mit",
"hash": -6561351702428765000,
"line_mean": 33.1992031873,
"line_max": 79,
"alpha_frac": 0.5471420487,
"autogenerated": false,
"ratio": 4.523809523809524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5570951572509524,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.