text stringlengths 0 1.05M | meta dict |
|---|---|
"""
Custom authentication backend
"""
import logging, traceback, crypt
from django.contrib.auth import backends
from django.conf import settings
from django.db import connection
from rest_framework import permissions
from . import models, exchange
log = logging.getLogger(__name__)
ACTION_MAPS = {
'object': {
'retrieve': 'read',
'destroy': 'write',
'update': 'write',
'partial_update': 'write'
}
}
class AntiochPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
ex = exchange.ObjectExchange(connection, ctx=request.user.avatar.pk)
user = ex.get_object(request.user.avatar.pk)
obj = ex.load(view.basename, obj.pk)
action = 'read' if view.action == 'retrieve' else 'write'
return ex.is_allowed(user, action, obj)
class AntiochObjectBackend(backends.ModelBackend):
"""
Authenticate against the antioch object database.
"""
def authenticate(self, request, username=None, password=None):
"""
Attempt to authenticate the provided request with the given credentials.
"""
try:
p = models.Player.objects.filter(
avatar__name__iexact = username,
enabled = True
)[:1]
if not(p):
log.error("Django auth failed.")
return None
p = p[0]
if(p.crypt != crypt.crypt(password, p.crypt[0:2])):
return None
log.info('%s logged in' % p.avatar)
return p
except models.Player.DoesNotExist:
log.error("Player auth failed.")
return None
except Exception as e:
log.error("Error in authenticate(): %s" % traceback.format_exc())
def get_user(self, user_id):
"""
Return the user object represented by user_id
"""
try:
p = models.Player.objects.get(pk=user_id)
if(p):
return p
return None
except models.Player.DoesNotExist:
return None
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/auth.py",
"copies": "1",
"size": "2332",
"license": "mit",
"hash": 6705590292026686000,
"line_mean": 26.7619047619,
"line_max": 80,
"alpha_frac": 0.5797598628,
"autogenerated": false,
"ratio": 4.24,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01925269242039326,
"num_lines": 84
} |
"""
Default database bootstrap.
"""
from antioch.core import interface, bootstrap
from antioch.util import sql
for name in interface.default_permissions:
exchange.connection.runOperation(sql.build_insert('permission', name=name))
exchange.load_permissions()
system = exchange.instantiate('object', name='System Object')
set_default_permissions_verb = interface.Verb(system)
set_default_permissions_verb._method = True
set_default_permissions_verb._code = bootstrap.get_source('system_set_default_permissions.py')
exchange.save(set_default_permissions_verb)
set_default_permissions_verb.add_name('set_default_permissions')
set_default_permissions_verb(set_default_permissions_verb)
set_default_permissions_verb(system)
wizard = exchange.instantiate('object', name='Wizard', unique_name=True)
wizard.set_owner(wizard)
system.set_owner(wizard)
set_default_permissions_verb.set_owner(wizard)
bag_of_holding = exchange.instantiate('object', name='bag of holding')
bag_of_holding.set_owner(wizard)
bag_of_holding.set_location(wizard)
author_hammer = exchange.instantiate('object', name='author hammer', unique_name=True)
author_hammer.set_owner(wizard)
author_hammer.set_location(bag_of_holding)
wizard_hammer = exchange.instantiate('object', name='wizard hammer', unique_name=True)
wizard_hammer.set_owner(wizard)
wizard_hammer.set_location(bag_of_holding)
player_class = exchange.instantiate('object', name='player class')
player_class.set_location(bag_of_holding)
player_class.set_owner(wizard)
guest_class = exchange.instantiate('object', name='guest class')
guest_class.set_owner(wizard)
guest_class.add_parent(player_class)
guest_class.set_location(bag_of_holding)
author_class = exchange.instantiate('object', name='author class')
author_class.set_owner(wizard)
author_class.add_parent(player_class)
author_class.set_location(bag_of_holding)
programmer_class = exchange.instantiate('object', name='programmer class')
programmer_class.set_owner(wizard)
programmer_class.add_parent(author_class)
programmer_class.set_location(bag_of_holding)
wizard_class = exchange.instantiate('object', name='wizard class')
wizard_class.set_owner(wizard)
wizard_class.add_parent(programmer_class)
wizard_class.set_location(bag_of_holding)
wizard.add_parent(wizard_class)
room_class = exchange.instantiate('object', name='room class')
room_class.set_owner(wizard)
laboratory = exchange.instantiate('object', name='The Laboratory', unique_name=True)
laboratory.set_owner(wizard)
laboratory.add_parent(room_class)
laboratory.add_property('description', **dict(
owner_id = wizard.get_id(),
value = """A cavernous laboratory filled with gadgetry of every kind,
this seems like a dumping ground for every piece of dusty forgotten
equipment a mad scientist might require.
""",
))
lobby = exchange.instantiate('object', name='The Lobby', unique_name=True)
lobby.set_owner(wizard)
lobby.add_parent(room_class)
lobby.add_property('description', **dict(
owner_id = wizard.get_id(),
value = """A dusty old waiting area, every minute spent in this room
feels like an eternity.
""",
))
wizard.set_location(laboratory)
wizard.set_player(True, is_wizard=True, passwd='wizard')
system.add_verb('authenticate', **dict(
method = True,
filename = 'system_authenticate.py',
repo = 'default',
ref = 'master'
))
system.add_verb('connect', **dict(
method = True,
filename = 'system_connect.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
system.add_verb('login', **dict(
method = True,
filename = 'system_login.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
system.add_verb('logout', **dict(
method = True,
filename = 'system_logout.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
wizard_class.add_verb('edit', **dict(
ability = True,
filename = 'wizard_class_edit.py',
repo = 'default',
ref = 'master'
))
wizard_class.add_verb('exec', **dict(
ability = True,
filename = 'wizard_class_exec.py',
repo = 'default',
ref = 'master'
)).allow('wizards', 'execute')
wizard_class.add_verb('eval', **dict(
ability = True,
filename = 'wizard_class_eval.py',
repo = 'default',
ref = 'master'
)).allow('wizards', 'execute')
# wizard_class.add_verb('adduser', **dict(
# ability = True,
# filename = 'wizard_class_adduser.py',
# repo = 'default',
# ref = 'master'
# )).allow('wizards', 'execute')
wizard_class.add_verb('passwd', **dict(
ability = True,
method = True,
filename = 'wizard_class_passwd.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('alias', **dict(
ability = True,
filename = 'author_class_alias.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('make', **dict(
ability = True,
filename = 'author_class_make.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('inspect', **dict(
ability = True,
filename = 'author_class_inspect.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('dig', **dict(
ability = True,
filename = 'author_class_dig.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('tunnel', **dict(
ability = True,
filename = 'author_class_tunnel.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author_class.add_verb('describe', **dict(
ability = True,
filename = 'author_class_describe.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
guest_class.add_verb('passwd', **dict(
ability = True,
code = 'write(caller, "Guests cannot change their passwords.")',
)).allow('everyone', 'execute')
player_class.add_verb('set', **dict(
ability = True,
filename = 'player_class_set.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_class.add_verb('look', **dict(
ability = True,
method = True,
filename = 'player_class_look.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_class.add_verb('go', **dict(
ability = True,
filename = 'player_class_go.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_class.add_verb('say', **dict(
ability = True,
filename = 'player_class_say.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_class.add_verb('hear', **dict(
method = True,
filename = 'player_class_hear.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_class.add_verb('passwd', **dict(
ability = True,
filename = 'player_class_passwd.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
room_class.add_verb('hear', **dict(
method = True,
filename = 'room_class_hear.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
author = exchange.instantiate('object', name='Author', unique_name=True)
author.set_owner(author)
author.set_location(laboratory)
author.set_player(True, passwd='author')
author.add_parent(author_class)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/bootstrap/default.py",
"copies": "1",
"size": "7958",
"license": "mit",
"hash": 2705103815562514400,
"line_mean": 28.9172932331,
"line_max": 94,
"alpha_frac": 0.6305604423,
"autogenerated": false,
"ratio": 3.2938741721854305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9255225359234789,
"avg_score": 0.033841851050128144,
"num_lines": 266
} |
"""
Encode/decode antioch JSON
antioch-flavored JSON contains object references, where objects are
saved in the format::
{'o#1':'ObjectName'}
{'v#1':'VerbName'}
{'p#1':'PropertyName'}
These will be converted transparently by the dumps/loads functions
in this module.
"""
import json
def loads(j, exchange=None):
"""
Load some antioch-flavored JSON.
If exchange is supplied, this will convert references to the real thing.
"""
if not(j):
return j
def to_entity(d):
if(len(d) != 1):
return d
key = list(d.keys())[0]
if(key[1] == '#'):
try:
if(key[0] == 'o'):
return exchange.get_object(key[1:])
elif(key[0] == 'v'):
return exchange.instantiate('verb', id=int(key[2:]))
elif(key[0] == 'p'):
return exchange.instantiate('property', id=int(key[2:]))
except:
return 'missing:%s' % key
return d
try:
if(exchange):
return json.loads(j, object_hook=to_entity)
else:
return json.loads(j)
except:
return j.strip('"').strip("'")
def dumps(obj):
"""
Create some antioch-flavored JSON (containing antioch object references).
"""
from antioch.core import interface
def from_entity(o):
if not(isinstance(o, interface.Entity)):
return o
if(isinstance(o, interface.Object)):
return {'o#%d' % o.get_id():o.get_name(real=True)}
elif(isinstance(o, interface.Verb)):
return {'v#%d' % o.get_id():o.name}
elif(isinstance(o, interface.Property)):
return {'p#%d' % o.get_id():o.name}
return json.dumps(obj, default=from_entity)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/util/ason.py",
"copies": "1",
"size": "1913",
"license": "mit",
"hash": -2307692342454263300,
"line_mean": 25.5694444444,
"line_max": 77,
"alpha_frac": 0.542603241,
"autogenerated": false,
"ratio": 3.650763358778626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46933665997786256,
"avg_score": null,
"num_lines": null
} |
"""
Graphical object editor support.
"""
from zope.interface import provider
from antioch import IPlugin
from . import forms
from django.conf import settings
def edit(p, item):
p.exchange.send_message(p.caller.get_id(), dict(
command = 'edit',
details = item.get_details(),
))
def access(p, item):
acl = p.exchange.get_access(item.get_id(), item.get_type())
details = dict(
id = str(item),
type = item.get_type(),
origin = str(getattr(item, 'origin', '')),
access = [dict(
access_id = rule['id'],
rule = rule['rule'],
access = rule['type'],
accessor = str(p.exchange.get_object(rule['accessor_id'])) if rule['accessor_id'] else rule['group'],
permission = rule['permission_name'],
) for rule in acl]
)
p.exchange.send_message(p.caller.get_id(), dict(
command = 'access',
details = details,
))
@provider(IPlugin)
class EditorPlugin(object):
script_url = 'js/editor-plugin.js'
def get_media(self):
f = forms.ObjectForm()
return f.media
def get_environment(self):
return dict(
edit = edit,
access = access,
)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/plugins/editors/plugin.py",
"copies": "1",
"size": "1438",
"license": "mit",
"hash": 712589949140565400,
"line_mean": 23.7931034483,
"line_max": 116,
"alpha_frac": 0.5312934631,
"autogenerated": false,
"ratio": 3.7253886010362693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9507719965531016,
"avg_score": 0.049792419721050564,
"num_lines": 58
} |
"""
Log customization support.
"""
import sys, time
from django.utils import termcolors
styles = dict(
ERROR = termcolors.make_style(fg='red', opts=['bold']),
WARNING = termcolors.make_style(fg='yellow', opts=['bold']),
INFO = termcolors.make_style(fg='cyan'),
DEBUG = termcolors.make_style(fg='blue'),
)
class DjangoColorFormatter(object):
"""
Colorize log output when outputting to a terminal.
"""
def __init__(self, logformat=None, datefmt=None):
"""
Create a formatter with the provided formats.
"""
self.logformat = logformat if logformat else '[%(asctime)s] %(levelname)s: %(msg)s'
self.datefmt = datefmt if datefmt else '%d/%b/%Y %H:%M:%S'
def format(self, log):
"""
Format a message.
"""
supports_color = True
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
is_a_tty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
try:
msg = log.msg % log.args
except:
msg = '%s %s' % (log.msg, log.args)
result = self.logformat % dict(
name = log.name,
asctime = time.strftime(self.datefmt, time.gmtime(log.created)),
levelname = log.levelname,
pathname = log.pathname,
funcName = log.funcName,
lineno = log.lineno,
msg = msg,
thread = log.thread,
threadName = log.threadName,
process = log.process,
processName = log.processName,
)
if log.levelname not in styles or unsupported_platform or not is_a_tty:
return result
else:
return styles[log.levelname](result)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/util/logs.py",
"copies": "1",
"size": "1859",
"license": "mit",
"hash": -731590708571011000,
"line_mean": 28.5079365079,
"line_max": 91,
"alpha_frac": 0.5621301775,
"autogenerated": false,
"ratio": 3.8810020876826723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49431322651826726,
"avg_score": null,
"num_lines": null
} |
import logging
from celery import shared_task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.db import connection
from antioch.core import code, exchange, errors, parser
from antioch.util import sql, ason
log = get_task_logger(__name__)
def get_exchange(ctx=None):
"""
Get an ObjectExchange instance for the provided context.
"""
if(ctx):
return exchange.ObjectExchange(connection, queue=True, ctx=ctx)
else:
return exchange.ObjectExchange(connection)
@shared_task
def authenticate(username, password, ip_address):
"""
Return the user id for the username/password combo, if valid.
"""
with get_exchange() as x:
connect = x.get_verb(1, 'connect')
if(connect):
connect(ip_address)
authentication = x.get_verb(1, 'authenticate')
if(authentication):
u = authentication(username, password, ip_address)
if(u):
return {'user_id': u.get_id()}
try:
u = x.get_object(username)
if not(u):
raise errors.PermissionError("Invalid login credentials. (2)")
except errors.NoSuchObjectError as e:
raise errors.PermissionError("Invalid login credentials. (3)")
except errors.AmbiguousObjectError as e:
raise errors.PermissionError("Invalid login credentials. (4)")
multilogin_accounts = x.get_property(1, 'multilogin_accounts')
if(u.is_connected_player()):
if(not multilogin_accounts or u not in multilogin_accounts.value):
raise errors.PermissionError('User is already logged in.')
if not(u.validate_password(password)):
raise errors.PermissionError("Invalid login credentials. (6)")
return {'user_id': u.get_id()}
@shared_task
def deploy(user_id, source):
with get_exchange(user_id) as x:
d = code.parse_deployment(source)
origin = x.get_object(d.origin)
obj = x.instantiate(d.type, origin_id=origin.id, name=d.name)
if(d.type == 'verb'):
obj.set_code(source)
obj.set_owner(x.get_object(d.owner))
obj.set_method(d.method)
obj.set_ability(d.ability)
for rule in d.access_group:
grant, accessor, permission = rule.split(":")
if(grant == 'allow'):
obj.allow(accessor, permission)
elif(grant == 'deny'):
obj.deny(accessor, permission)
for rule in d.access_object:
grant, accessor, permission = rule.split(":")
if(grant == 'allow'):
obj.allow(x.get_object(accessor), permission)
elif(grant == 'deny'):
obj.deny(x.get_object(accessor), permission)
@shared_task
def login(user_id, session_id, ip_address):
"""
Register a login for the provided user_id.
"""
with get_exchange(user_id) as x:
x.login_player(user_id, session_id)
system = x.get_object(1)
if(system.has_verb("login")):
system.login()
log.info('user #%s logged in from %s' % (user_id, ip_address))
return {'response': True}
@shared_task
def logout(user_id):
"""
Register a logout for the provided user_id.
"""
# we want to make sure to logout the user even
# if the logout verb fails
with get_exchange(user_id) as x:
x.logout_player(user_id)
with get_exchange(user_id) as x:
system = x.get_object(1)
if(system.has_verb("logout")):
system.logout()
log.info('user #%s logged out' % user_id)
return {'response': True}
@shared_task
def parse(user_id, sentence):
"""
Parse a command sentence for the provided user_id.
"""
with get_exchange(user_id) as x:
caller = x.get_object(user_id)
log.info('%s: %s' % (caller, sentence))
parser.parse(caller, sentence)
return {'response': True}
@shared_task
def registertask(user_id, delay, origin_id, verb_name, args, kwargs):
"""
Register a delayed task for the provided user_id.
"""
with get_exchange(user_id) as x:
try:
task_id = x.register_task(user_id, delay, origin_id, verb_name, args, kwargs)
except Exception as e:
print(e)
raise e
return {'task_id': task_id}
@shared_task
def runtask(user_id, task_id):
"""
Run a task for a particular user.
"""
with get_exchange(user_id) as x:
task = x.get_task(task_id)
if(not task or task['killed']):
return {'response': False}
origin = x.get_object(task['origin_id'])
args = json.loads(task['args'])
kwargs = json.loads(task['kwargs'])
v = origin.get_verb(task['verb_name'])
v(*args, **kwargs)
return {'response': True}
@shared_task
def iteratetasks():
"""
Run one waiting task, if possible.
"""
# note this is a 'superuser exchange'
# should be fine, since all iterate_task does
# is create another subprocess for the proper user
with get_exchange() as x:
task = x.iterate_task(self)
return {'response':task} | {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/tasks.py",
"copies": "1",
"size": "5308",
"license": "mit",
"hash": 4492224547416739000,
"line_mean": 28.6592178771,
"line_max": 89,
"alpha_frac": 0.5981537302,
"autogenerated": false,
"ratio": 3.7196916608269097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.481784539102691,
"avg_score": null,
"num_lines": null
} |
import logging
from celery import shared_task
from antioch.util import ason
from antioch.core import parser, tasks
log = logging.getLogger(__name__)
@shared_task
def openeditor(user_id, object_id, type, name):
with tasks.get_exchange(user_id) as x:
if(type == 'object'):
item = x.get_object(object_id)
else:
item = getattr(x, 'get_' + type)(object_id, name)
if(item is None):
item = x.instantiate(type, owner_id=user_id, origin_id=object_id, name=name)
caller = x.get_object(user_id)
p = parser.TransactionParser(parser.Lexer(''), caller, x)
from antioch.plugins import editors
editors.edit(p, item)
return {'response': True}
@shared_task
def openaccess(user_id, object_id, type, name):
with tasks.get_exchange(user_id) as x:
origin = x.get_object(object_id)
caller = x.get_object(user_id)
if(type == 'object'):
item = origin
else:
item = getattr(origin, 'get_' + type)(name)
p = parser.TransactionParser(parser.Lexer(''), caller, x)
from antioch.plugins import editors
editors.access(p, item)
return {'response': True}
@shared_task
def modifyobject(user_id, object, name, location, parents, owner):
with tasks.get_exchange(user_id) as x:
o = x.get_object(object)
o.set_name(name, real=True)
o.set_location(x.get_object(location) if location else None)
o.set_owner(x.get_object(owner))
old_parents = o.get_parents()
new_parents = [x.get_object(p) for p in parents]
[o.remove_parent(p) for p in old_parents if p not in new_parents]
[o.add_parent(p) for p in new_parents if p not in old_parents]
return {'response': True}
@shared_task
def modifyverb(user_id, object_id, verb_id, names, code, ability, method, owner):
with tasks.get_exchange(user_id) as x:
names = [n.strip() for n in names.split(',')]
v = x.load('verb', verb_id)
v.set_names(names)
v.set_owner(x.get_object(owner))
v.set_code(code)
v.set_ability(ability)
v.set_method(method)
return {'response': True}
@shared_task
def removeverb(user_id, object_id, verb_name):
with tasks.get_exchange(user_id) as x:
obj = x.get_object(object_id)
obj.remove_verb(verb_name)
return {'response': True}
@shared_task
def removeproperty(user_id, object_id, property_name):
with tasks.get_exchange(user_id) as x:
obj = x.get_object(object_id)
obj.remove_property(property_name)
return {'response': True}
@shared_task
def modifyproperty(user_id, object_id, property_id, name, value, type, owner):
with tasks.get_exchange(user_id) as x:
p = x.load('property', property_id)
p.set_name(name)
p.set_owner(x.get_object(owner))
p.set_value(json.loads(value, exchange=x), type=type)
return {'response': True}
@shared_task
def modifyaccess(user_id, object_id, type, access):
with tasks.get_exchange(user_id) as x:
subject = x.get_object(object_id)
for rule in access:
if(rule['access'] == 'accessor'):
rule['accessor'] = x.get_object(rule['accessor'])
x.update_access(subject=subject, **rule)
return {'response': True}
@shared_task
def getobjectdetails(user_id, object_id):
with tasks.get_exchange(user_id) as x:
obj = x.get_object(object_id)
details = obj.get_details()
return details | {
"repo_name": "philchristensen/antioch",
"path": "antioch/plugins/editors/tasks.py",
"copies": "1",
"size": "3711",
"license": "mit",
"hash": 3263516196533420000,
"line_mean": 28.935483871,
"line_max": 92,
"alpha_frac": 0.6087308003,
"autogenerated": false,
"ratio": 3.325268817204301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4433999617504301,
"avg_score": null,
"num_lines": null
} |
"""
Online signup for new players.
"""
import logging
import pkg_resources as pkg
from zope.interface import provider
from antioch import IPlugin
from antioch.util import ason
from antioch.core import parser, code
log = logging.getLogger(__name__)
@provider(IPlugin)
class SignupPlugin(object):
script_url = None
def initialize(self, exchange):
p = 'antioch.plugins.signup.verbs'
system = exchange.get_object(1)
system.add_verb('add_player', **dict(
method = True,
filename = pkg.resource_filename(p, 'system_add_player.py')
))
system.add_verb('enable_player', **dict(
method = True,
filename = pkg.resource_filename(p, 'system_enable_player.py')
))
def get_environment(self):
def add_player(p, name=None, passwd=None, enabled=True):
system = p.exchange.get_object(1)
p.caller and p.caller.is_allowed('administer', system, fatal=True)
klass = p.exchange.get_object('player class')
user = p.exchange.instantiate('object', name=name, unique_name=True)
user.set_owner(user)
user.set_player(is_player=False, passwd=passwd)
return user
def enable_player(p, user):
user.set_player(is_player=True)
return dict(
add_player = add_player,
enable_player = enable_player,
)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/plugins/signup/plugin.py",
"copies": "1",
"size": "1580",
"license": "mit",
"hash": -2498613053845139000,
"line_mean": 26.7192982456,
"line_max": 80,
"alpha_frac": 0.5917721519,
"autogenerated": false,
"ratio": 3.7264150943396226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48181872462396225,
"avg_score": null,
"num_lines": null
} |
"""
Parse command strings sent by the client.
This parser can understand a variety of phrases, but they are all represented
by the (BNF?) form:
<verb>[[[<dobj spec> ]<direct-object> ]+[<prep> [<pobj spec> ]<object-of-the-preposition>]*]
There are a long list of prepositions supported, some of which are interchangeable.
"""
import sys, time, re, string, types, logging
from antioch.core import exchange, interface, errors
from antioch.core.errors import *
log = logging.getLogger(__name__)
URL_REGEXP = r'(?P<scheme>[+a-z0-9]+)\:(\/\/)?'
URL_REGEXP += r'((?P<user>\w+?)(\:(?P<passwd>\w+?))?\@)?'
URL_REGEXP += r'(?P<host>[\._\-a-z0-9]+)(\:(?P<port>\d+)?)?'
URL_REGEXP += r'(?P<path>/[^\s;?#]*)(;(?P<params>[^\s?#]*))?'
URL_REGEXP += r'(\?(?P<query>[^\s#]*))?(\#(?P<fragment>[^\s]*))?'
URL_RE = re.compile(URL_REGEXP, re.IGNORECASE)
class URL(dict):
def __init__(self, source):
match = URL_RE.match(source)
self.update(match.groupdict())
self.source = str(source)
def __str__(self):
return self.source
#Here are all our supported prepositions
preps = [['with', 'using'],
['at', 'to'],
['in front of'],
['in', 'inside', 'into', 'within'],
['on top of', 'on', 'onto', 'upon', 'above'],
['out of', 'from inside', 'from'],
['over'],
['through'],
['under', 'underneath', 'beneath', 'below'],
['around', 'round'],
['between', 'among'],
['behind', 'past'],
['beside', 'by', 'near', 'next to', 'along'],
['for', 'about'],
#['is'],
['as'],
['off', 'off of']]
prepstring = ""
for item in preps:
prepstring += "|".join(item)
if(item != preps[len(preps) - 1]):
prepstring += "|"
PREP_SRC = r'(?:\b)(?P<prep>' + prepstring + r')(?:\b)'
SPEC = r"(?P<spec_str>my|the|a|an|\S+(?:\'s|s\'))"
PHRASE_SRC = r'(?:' + SPEC + r'\s)?(?P<obj_str>.+)'
PREP = re.compile(PREP_SRC)
PHRASE = re.compile(PHRASE_SRC)
POBJ_TEST = re.compile(PREP_SRC + "\s" + PHRASE_SRC)
MULTI_WORD = re.compile(r'((\"|\').+?(?!\\).\2)|(\S+)')
def parse(caller, sentence, debug=False):
"""
For a given user, execute a command.
"""
t = dict(time=time.time())
def _profile(name):
if(debug):
log.debug("%s took %4f seconds" % (
name, time.time() - t['time']
))
t['time'] = time.time()
l = Lexer(sentence)
_profile('lexer')
p = TransactionParser(l, caller, caller.get_exchange())
_profile('parser')
v = p.get_verb()
_profile('verb search')
v.execute(p)
_profile('execution')
def get_default_parser(v):
"""
A default parser is used by Verbs to support __call__ usage
"""
x = v.get_exchange()
l = Lexer(v.name)
p = TransactionParser(l, x.get_context(), x)
p.verb = v
p.this = v.get_source()
return p
class Lexer(object):
"""
An instance of this class will identify the various parts of a imperitive
sentence. This may be of use to verb code, as well.
"""
def __init__(self, command):
self.command = command
self.dobj_str = None
self.dobj_spec_str = None
# First, find all words or double-quoted-strings in the text
iterator = re.finditer(MULTI_WORD, command)
self.words = []
qotd_matches = []
for item in iterator:
if(item.group(1)):
qotd_matches.append(item)
word = item.group().strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
self.words.append(word)
# Now, find all prepositions
iterator = re.finditer(PREP, command)
prep_matches = []
for item in iterator:
prep_matches.append(item)
#this method will be used to filter out prepositions inside quotes
def nonoverlap(item):
(start, end) = item.span()
for word in qotd_matches:
(word_start, word_end) = word.span()
if(start > word_start and start < word_end):
return False
elif(end > word_start and end < word_end):
return False
return True
#nonoverlap() will leave only true non-quoted prepositions
prep_matches = list(filter(nonoverlap, prep_matches))
#determine if there is anything after the verb
if(len(self.words) > 1):
#if there are prepositions, we only look for direct objects
#until the first preposition
if(prep_matches):
end = prep_matches[0].start()-1
else:
end = len(command)
#this is the phrase, which could be [[specifier ]object]
dobj_phrase = command[len(self.words[0]) + 1:end]
match = re.match(PHRASE, dobj_phrase)
if(match):
result = match.groupdict()
self.dobj_str = result['obj_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
if(result['spec_str']):
self.dobj_spec_str = result['spec_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
else:
self.dobj_spec_str = ''
self.prepositions = {}
#iterate through all the prepositional phrase matches
for index in range(len(prep_matches)):
start = prep_matches[index].start()
#if this is the last preposition, then look from here until the end
if(index == len(prep_matches) - 1):
end = len(command)
#otherwise, search until the next preposition starts
else:
end = prep_matches[index + 1].start() - 1
prep_phrase = command[start:end]
phrase_match = re.match(POBJ_TEST, prep_phrase)
if not(phrase_match):
continue
result = phrase_match.groupdict()
#if we get a quoted string here, strip the quotes
result['obj_str'] = result['obj_str'].strip('\'"').replace("\\'", "'").replace("\\\"", "\"")
if(result['spec_str'] is None):
result['spec_str'] = ''
#if there is already a entry for this preposition, we turn it into
#a list, and if it already is one, we append to it
if(result['prep'] in self.prepositions):
item = self.prepositions[result['prep']]
if not(isinstance(item[0], list)):
self.prepositions[result['prep']] = [[result['spec_str'], result['obj_str'], None], item]
else:
self.prepositions[result['prep']].append([result['spec_str'], result['obj_str'], None])
#if it's a new preposition, we just save it here.
else:
self.prepositions[result['prep']] = [result['spec_str'], result['obj_str'], None]
def get_details(self):
return dict(
command = self.command,
dobj_str = self.dobj_str,
dobj_spec_str = self.dobj_spec_str,
words = self.words,
prepositions = self.prepositions,
)
class TransactionParser(object):
"""
The parser instance is created by the avatar. A new instance is created
for each remote call to perspective_parse.
"""
def __init__(self, lexer, caller, exchange):
"""
Create a new parser object for the given command, as issued by
the given caller, using the registry.
"""
self.lexer = lexer
self.caller = caller
self.exchange = exchange
self.this = None
self.verb = None
if(self.lexer):
for key, value in list(self.lexer.get_details().items()):
self.__dict__[key] = value
for prep in self.prepositions:
prep_record_list = self.prepositions[prep]
if not(isinstance(prep_record_list[0], list)):
prep_record_list = [prep_record_list]
for record in prep_record_list:
#look for an object with this name/specifier
obj = self.find_object(record[0], record[1])
#try again (maybe it just looked like a specifier)
if(not obj and record[0]):
record[1] = record[0] + ' ' + record[1]
record[0] = ''
obj = self.find_object(record[0], record[1])
#one last shot for pronouns
if not(obj):
obj = self.get_pronoun_object(record[1])
record[2] = obj
if(hasattr(self, 'dobj_str') and self.dobj_str):
#look for an object with this name/specifier
self.dobj = self.find_object(self.dobj_spec_str, self.dobj_str)
#try again (maybe it just looked like a specifier)
if(not self.dobj and self.dobj_spec_str):
self.dobj_str = self.dobj_spec_str + ' ' + self.dobj_str
self.dobj_spec_str = ''
self.dobj = self.find_object(None, self.dobj_str)
#if there's nothing with this name, then we look for
#pronouns before giving up
if not(self.dobj):
self.dobj = self.get_pronoun_object(self.dobj_str)
else:
#didn't find anything, probably because nothing was there.
self.dobj = None
self.dobj_str = None
def get_environment(self):
"""
Return a dictionary of environment variables supplied by the parser results.
"""
return dict(
parser = self,
command = self.command,
caller = self.caller,
dobj = self.dobj,
dobj_str = self.dobj_str,
dobj_spec_str = self.dobj_spec_str,
words = self.words,
prepositions = self.prepositions,
this = self.this,
self = self.verb,
system = self.exchange.get_object(1),
here = self.caller.get_location() if self.caller else None,
get_dobj = self.get_dobj,
get_dobj_str = self.get_dobj_str,
has_dobj = self.has_dobj,
has_dobj_str = self.has_dobj_str,
get_pobj = self.get_pobj,
get_pobj_str = self.get_pobj_str,
has_pobj = self.has_pobj,
has_pobj_str = self.has_pobj_str,
)
def find_object(self, specifier, name, return_list=False):
"""
Look for an object, with the optional specifier, in the area
around the person who entered this command. If the posessive
form is used (i.e., "Bill's spoon") and that person is not
here, a NoSuchObjectError is thrown for that person.
"""
result = None
search = None
if(specifier == 'my'):
search = self.caller
elif(specifier and specifier.find("'") != -1):
person = specifier[0:specifier.index("'")]
location = self.caller.get_location()
if(location):
search = location.find(person)
else:
search = self.caller.get_location()
if(name and search):
result = search.find(name)
if(isinstance(result, interface.Object)):
return result
elif(return_list):
return result
elif(not result):
return None
else:
raise errors.AmbiguousObjectError(name, result)
def get_verb(self):
"""
Determine the most likely verb for this sentence. There is a search
order for verbs, as follows::
Caller->Caller's Contents->Location->Items in Location->
Direct Object->Objects of the Preposition
"""
if not(self.words):
raise NoSuchVerbError('parser: ' + self.command)
if(getattr(self, 'verb', None) is not None):
return self.verb
verb_str = self.words[0]
matches = []
ctx = self.caller
checks = [self.caller]
checks.extend(self.caller.get_contents())
location = self.caller.get_location()
if(location):
checks.append(location)
checks.extend(location.get_contents())
checks.append(self.dobj)
for key in self.prepositions:
# if there were multiple uses of a preposition
if(isinstance(self.prepositions[key][0], list)):
# then check each one for a verb
checks.extend([pobj[2] for pobj in self.prepositions[key] if pobj[2]])
else:
checks.append(self.prepositions[key][2])
matches = [x for x in checks if x and x.has_verb(verb_str)]
self.this = self.filter_matches(matches)
if(isinstance(self.this, list)):
if(len(self.this) > 1):
raise AmbiguousVerbError(verb_str, self.this)
elif(len(self.this) == 0):
self.this = None
else:
self.this = self.this[0]
if not(self.this):
raise NoSuchVerbError('parser: ' + verb_str)
#print "Verb found on: " + str(self.this)
self.verb = self.this.get_verb(self.words[0], recurse=True)
return self.verb
def filter_matches(self, possible):
result = []
# print "possble is " + str(possible)
if not(isinstance(possible, list)):
possible = [possible]
verb_str = self.words[0]
for item in possible:
if(item is None):
continue
if(item in result):
continue
verb = item.get_verb(verb_str)
if(not verb.performable_by(self.caller)):
continue
if(verb.is_ability() and item.get_id() != self.caller.get_id()):
continue
result.append(item)
# print "result is " + str(result)
return result
def get_pronoun_object(self, pronoun):
"""
Return the correct object for various pronouns.
Also, a object number (starting with a #) will
return the object for that id.
"""
ctx = self.caller
if(pronoun == "me"):
return self.caller
elif(pronoun == "here"):
return self.caller.get_location()
# elif(pronoun == "this"):
# return self.caller.get_observing(ctx)
elif(pronoun[0] == "#"):
return self.exchange.get_object(pronoun)
else:
return None
def get_dobj(self):
"""
Get the direct object for this parser. If there was no
direct object found, raise a NoSuchObjectError
"""
if not(self.dobj):
raise NoSuchObjectError(self.dobj_str)
return self.dobj
def get_pobj(self, prep):
"""
Get the object for the given preposition. If there was no
object found, raise a NoSuchObjectError; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
if(item[2]):
matches.append(item[2])
if(len(matches) > 1):
raise AmbiguousObjectError(matches[0][1], matches)
elif not(matches):
raise NoSuchObjectError(self.prepositions[prep][0][1])
if not(self.prepositions[prep][2]):
raise NoSuchObjectError(self.prepositions[prep][1])
return self.prepositions[prep][2]
def get_dobj_str(self):
"""
Get the direct object **string** for this parser. If there was no
direct object **string** found, raise a NoSuchObjectError
"""
if not(self.dobj_str):
raise NoSuchObjectError('direct object')
return self.dobj_str
def get_pobj_str(self, prep, return_list=False):
"""
Get the object **string** for the given preposition. If there was no
object **string** found, raise a NoSuchObjectError; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
if(item[1]):
matches.append(item[1])
if(len(matches) > 1):
if(return_list):
return matches
else:
raise matches[0]
elif not(matches):
raise NoSuchObjectError(self.prepositions[prep][0][1])
return self.prepositions[prep][1]
def get_pobj_spec_str(self, prep, return_list=False):
"""
Get the object **specifier** for the given preposition. If there was no
object **specifier** found, return the empty string; if the preposition
was not found, raise a NoSuchPrepositionError.
"""
if not(prep in self.prepositions):
raise NoSuchPrepositionError(prep)
if(isinstance(self.prepositions[prep][0], list)):
matches = []
for item in self.prepositions[prep]:
matches.append(item[0])
if(len(matches) > 1):
if(return_list):
return matches
else:
return matches[0]
return self.prepositions[prep][0]
def has_dobj(self):
"""
Was a direct object found?
"""
return self.dobj is not None
def has_pobj(self, prep):
"""
Was an object for this preposition found?
"""
if(prep not in self.prepositions):
return False
found_prep = False
if(isinstance(self.prepositions[prep][0], list)):
for item in self.prepositions[prep]:
if(item[2]):
found_prep = True
break
else:
found_prep = bool(self.prepositions[prep][2])
return found_prep
def has_dobj_str(self):
"""
Was a direct object string found?
"""
return self.dobj_str != None
def has_pobj_str(self, prep):
"""
Was a object string for this preposition found?
"""
if(prep not in self.prepositions):
return False
found_prep = False
if(isinstance(self.prepositions[prep][0], list)):
for item in self.prepositions[prep]:
if(item[1]):
found_prep = True
break
else:
found_prep = bool(self.prepositions[prep][1])
return found_prep
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/parser.py",
"copies": "1",
"size": "19767",
"license": "mit",
"hash": 5805749098907629000,
"line_mean": 34.8097826087,
"line_max": 114,
"alpha_frac": 0.5184398239,
"autogenerated": false,
"ratio": 4.01278928136419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.503122910526419,
"avg_score": null,
"num_lines": null
} |
"""
Plugins add additional client or server functionality
"""
import os, sys
from django.conf import settings
from django.conf.urls import include, url
from importlib import import_module
from django.utils.module_loading import module_has_submodule
plugin_cache = {}
def iterate():
"""
Iterate through installed Django apps that are antioch plugins.
"""
for app in settings.INSTALLED_APPS:
plugin_mod = get_app_submodule(app, submodule='plugin')
if(plugin_mod and plugin_mod.__name__.startswith('antioch')):
yield instantiate(plugin_mod)
def urlconfs():
"""
Return all urlconfs provided by antioch plugins.
"""
result = []
for app in settings.INSTALLED_APPS:
app_label = app.split('.')[-1]
p = get_app_submodule(app, submodule='plugin')
if(p and get_app_submodule(app, submodule='urls')):
urlconf = url(r'', include('%s.urls' % app))
result.append(urlconf)
return result
def get_app_submodule(app_name, submodule):
app = import_module(app_name)
# Attempt to import the app's plugin module.
try:
return import_module('%s.%s' % (app_name, submodule))
except:
# Decide whether to bubble up this error. If the app just
# doesn't have a plugin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app, submodule):
raise
def instantiate(plugin_mod):
from antioch import IPlugin
global plugin_cache
if(plugin_mod not in plugin_cache):
for name in dir(plugin_mod):
if(name.startswith('_')):
continue
p = getattr(plugin_mod, name)
if(IPlugin.providedBy(p)):
plugin_cache[plugin_mod] = p()
if(plugin_mod not in plugin_cache):
raise RuntimeError("Could not instantiate an antioch plugin from %r" % plugin_mod)
return plugin_cache[plugin_mod]
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/plugins/__init__.py",
"copies": "1",
"size": "2076",
"license": "mit",
"hash": 5481634730638895000,
"line_mean": 29.9850746269,
"line_max": 90,
"alpha_frac": 0.6411368015,
"autogenerated": false,
"ratio": 3.8949343339587243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036071135458724,
"avg_score": null,
"num_lines": null
} |
"""
Represent objects in-universe
"""
from antioch.core import errors, code
from antioch.util import ason
# These are the default list of permissions, auto-created
# first thing during universe boostrapping
default_permissions = (
'anything',
'read',
'write',
'entrust',
'grant',
'execute',
'move',
'transmute',
'derive',
'develop',
'administer',
)
# True if usable, None if restricted, False if unusable
add_verb_kwargs = dict(
code = True,
ability = True,
method = True,
filename = None,
ref = None,
repo = None,
owner_id = None,
origin_id = False,
)
add_property_kwargs = dict(
value = True,
type = True,
owner_id = None,
origin_id = False,
)
class PropertyStub(object):
"""
A convenience class to allow `obj.get('property', 'default').value`
"""
def __init__(self, value):
self.value = value
class Entity(object):
"""
Entities are the base class for all Objects, Verbs, and Properties.
"""
def __getattribute__(self, name):
"""
Private attribute protection using code.is_frame_access_allowed().
"""
return code.get_protected_attribute(self, name, object.__getattribute__)
def __setattr__(self, name, value):
"""
Private attribute protection using code.is_frame_access_allowed().
"""
return code.set_protected_attribute(self, name, value, object.__setattr__)
def __repr__(self):
"""
Just wrap the string representation in angle brackets.
"""
return '<%s>' % (self)
def set_id(self, id):
"""
Set the ID of this entity one time.
"""
if(self._id != 0):
raise RuntimeError("Can't redefine a %s's ID." % self.get_type())
self._id = id
def get_id(self):
"""
Get the ID of this entity.
"""
return self._id
id = property(get_id)
def get_origin(self):
"""
Get the object where this attribute was located (may not be where it was defined).
"""
if(self.get_type() == 'object'):
return self
# self.check('read', self)
return self._ex.instantiate('object', id=self._origin_id)
origin = property(get_origin)
def get_source(self):
"""
Get the object this attribute was defined on.
"""
if(self.get_type() == 'object'):
return self
if not hasattr(self, '_source_id'):
return None
# self.check('read', self)
return self._ex.instantiate('object', id=self._source_id)
source = property(get_source)
def get_exchange(self):
"""
Return the ObjectExchange instance used to load this entity.
"""
return self._ex
def get_context(self):
"""
Get the user responsible for the current runtime, if applicable.
"""
return self._ex.get_context()
def save(self):
"""
Save this entity back to the database.
"""
self._ex.save(self)
def destroy(self):
"""
Remove this entity from the database.
[ACL] allowed to destroy this
"""
self.check('destroy', self)
self._ex.destroy(self)
def set_owner(self, owner):
"""
Change the owner of this entity.
[ACL] allowed to entrust this
"""
self.check('entrust', self)
self._owner_id = owner.get_id()
self.save()
def get_owner(self):
"""
Get the owner of this entity.
"""
#self.check('read', self)
if not(self._owner_id):
return None
return self.get_exchange().instantiate('object', id=self._owner_id)
owner = property(get_owner, set_owner)
def check(self, permission, subject):
"""
Check if the current context has permission for something.
"""
ctx = self.get_context()
if ctx:
if(permission != 'grant' or not ctx.owns(subject)):
ctx.is_allowed(permission, subject, fatal=True)
def allow(self, accessor, permission, create=False):
"""
Allow a certain object or group to do something on this object.
[ACL] allowed to grant on this (or owner of this)
"""
self.check('grant', self)
if(isinstance(accessor, Object)):
self._ex.allow(self, accessor.get_id(), permission, create)
else:
self._ex.allow(self, accessor, permission, create)
def deny(self, accessor, permission, create=False):
"""
Deny a certain object or group from doing something on this object.
[ACL] allowed to grant on this (or owner of this)
"""
self.check('grant', self)
if(isinstance(accessor, Object)):
self._ex.deny(self, accessor.get_id(), permission, create)
else:
self._ex.deny(self, accessor, permission, create)
def get_type(self):
"""
Return whether this entity is an object, verb, or property.
"""
return type(self).__name__.lower()
class Object(Entity):
"""
Objects represent the 'things' of the antioch universe.
These models are really just views of a slightly more abstract entity.
Their behavior changes based on the user of the ObjectExchange they
were created from.
"""
__slots__ = ['_id', '_ex', '_name', '_unique_name', '_owner_id', '_location_id']
def __init__(self, exchange):
self._id = 0
self._ex = exchange
self._name = ''
self._unique_name = False
self._owner_id = None
self._location_id = None
def __str__(self):
"""
Return a string representation of this class. These take the form
of "#0 (System Object)"; the object ID is prefixed by a pound sign,
and the ID is followed by the real name of the object in parentheses.
"""
return "#%s (%s)" % (self._id, self._name)
def __getattr__(self, name):
"""
Attribute access is loads verbs and executes them in a method context.
"""
# used for verbs
v = self.get_verb(name)
if(v is None):
raise errors.NoSuchVerbError("No such verb `%s` on %s" % (name, self))
return v
def get_details(self):
"""
Get the essential details about this object.
"""
return dict(
id = self._id,
kind = self.get_type(),
__str__ = str(self),
name = self._name,
parents = ', '.join([str(x) for x in self.get_parents()]),
owner = str(self.get_owner()),
location = str(self.get_location()),
verbs = self._ex.get_verb_list(self.get_id()),
properties = self._ex.get_property_list(self.get_id()),
)
def owns(self, subject):
"""
Do I own the provided subject?
"""
owner = subject.get_owner()
return owner == self
def get_verb(self, name, recurse=True):
"""
Get a verb defined on this or an ancestor of this element.
"""
# self.check('read', self)
v = self._ex.get_verb(self._id, name, recurse=recurse)
if(v):
v._source_id = self.get_id()
return v
def add_verb(self, name, **kwargs):
"""
Create a new verb object and add it to this object.
[ACL] allowed to write on this
"""
self.check('write', self)
ctx = self._ex.get_context()
owner_id = ctx.get_id() if ctx else self._owner_id
for key, value in list(kwargs.items()):
access = add_verb_kwargs.get(key, False)
if access is None and ctx:
raise ValueError("Restricted keyword %r" % key)
elif access is False:
raise ValueError("Invalid keyword %r" % key)
kw = dict(origin_id=self._id, owner_id=owner_id, name=name)
kwargs.update(kw)
v = self._ex.instantiate('verb', **kwargs)
v._source_id = self.get_id()
return v
def remove_verb(self, name):
"""
Remove a verb from this object.
[ACL] allowed to write on this
"""
self.check('write', self)
self._ex.remove_verb(origin_id=self._id, name=name)
def has_verb(self, name):
"""
Return True if this or an ancestor of this object has a verb by this name.
"""
return self._ex.has(self._id, 'verb', name)
def has_callable_verb(self, name):
"""
Return True if this object or an ancestor has a verb executable to the current context.
"""
return self._ex.has(self._id, 'verb', name, unrestricted=False)
def get(self, name, default=None):
"""
Convenience method for get_property() to allow for providing a default.
Also made to match with the dictionary syntax used for properties.
"""
try:
return self[name]
except errors.NoSuchPropertyError as e:
return PropertyStub(default)
def get_ancestors(self):
"""
Get all ancestors of this object.
"""
return self._ex.get_ancestors(self._id)
def get_ancestor_with(self, type, name):
"""
Get the ancestor of this object that defines some attribute.
"""
return self._ex.get_ancestor_with(self._id, type, name)
def __getitem__(self, name):
"""
Item access loads properties.
"""
if(isinstance(name, int)):
raise IndexError(name)
# used for properties
p = self.get_property(name)
if(p is None):
raise errors.NoSuchPropertyError(name, self)
return p
def __contains__(self, name):
"""
Containment checks for readable properties.
"""
return self.has_readable_property(name)
def get_property(self, name, recurse=True):
"""
Return a property defined by this or an ancestor of this object.
"""
# self.check('read', self)
p = self._ex.get_property(self._id, name, recurse=recurse)
if(p):
p._source_id = self.get_id()
# if(p is not None and p.origin != self):
# return self.check('inherit', p)
return p
def add_property(self, name, **kwargs):
"""
Create and return a new property defined on this object.
[ACL] allowed to write on this
"""
self.check('write', self)
ctx = self._ex.get_context()
owner_id = ctx.get_id() if ctx else self._owner_id
for key, value in list(kwargs.items()):
access = add_property_kwargs.get(key, False)
if access is None and ctx:
raise ValueError("Restricted keyword %r" % key)
elif access is False:
raise ValueError("Invalid keyword %r" % key)
kw = dict(origin_id=self._id, owner_id=owner_id)
value = kwargs.pop('value', None)
kwargs.update(kw)
p = self._ex.instantiate('property', name=name, **kwargs)
p._source_id = self.get_id()
p.value = value
return p
def remove_property(self, name):
"""
Remove a property directly defined on this object.
[ACL] allowed to write on this
"""
self.check('write', self)
self._ex.remove_property(origin_id=self._id, name=name)
def has_property(self, name):
"""
Return True if this or an ancestor of this object defines the given property.
"""
return self._ex.has(self._id, 'property', name)
def has_readable_property(self, name):
"""
Return True if this object defines a property readable by the current user.
"""
return self._ex.has(self._id, 'property', name, unrestricted=False)
def is_player(self):
"""
Return True if this is a player's avatar object.
"""
return self._ex.is_player(self.get_id())
def is_wizard(self):
"""
Return True if this is an administrator's player object.
"""
return self._ex.is_wizard(self.get_id())
def set_player(self, is_player=None, is_wizard=None, passwd=None, **attribs):
"""
Set player-specific attributes of this object.
"""
return self._ex.set_player(self.get_id(), player=is_player, wizard=is_wizard, passwd=passwd, **attribs)
def validate_password(self, passwd):
"""
Validate this avatar's password.
"""
return self._ex.validate_password(self.get_id(), passwd)
def is_connected_player(self):
"""
Return True if this is a currently logged-in player.
"""
return self._ex.is_connected_player(self.get_id())
def set_name(self, name, real=False):
"""
Set the name of this object.
[ACL] allowed to write on this
"""
self.check('write', self)
if(real):
if(self._name != name and self._ex.is_unique_name(name)):
raise ValueError("Sorry, '%s' is a reserved name." % name)
elif(self._unique_name and self._ex.refs(name) > 1):
raise ValueError("Sorry, '%s' is an ambiguous name." % name)
self._name = name
self.save()
else:
if('name' in self):
self['name'].value = name
else:
p = self.add_property('name')
p.value = name
def add_alias(self, alias):
"""
Add an alias used by find() and other code to locate objects by name.
[ACL] allowed to write on this
"""
self.check('write', self)
self._ex.add_alias(self.get_id(), alias)
def remove_alias(self, alias):
"""
Remove an alias used by find() and other code to locate objects by name.
[ACL] allowed to write on this
"""
self.check('write', self)
self._ex.remove_alias(self.get_id(), alias)
def get_aliases(self):
"""
Get a list of aliases used by find() and other code to locate objects by name.
[ACL] allowed to develop on this
"""
self.check('develop', self)
return self._ex.get_aliases(self.get_id())
def add_observer(self, observer):
"""
Add an object to be notified when this object changes its appearance.
[ACL] allowed to read on this
[ACL] allowed to write on observer
"""
self.check('read', self)
self.check('write', observer)
self._ex.add_observer(self.get_id(), observer.get_id())
def remove_observer(self, observer):
"""
Remove an observer from this object's list.
[ACL] allowed to write on observer
"""
self.check('write', observer)
self._ex.remove_observer(self.get_id(), observer.get_id())
def get_observers(self):
"""
Return a list of observers for this object.
[ACL] allowed to read on this
"""
self.check('read', self)
return self._ex.get_observers(self.get_id())
def get_observing(self):
"""
Get the object this object is currently observing.
[ACL] allowed to read on this
"""
self.check('read', self)
return self._ex.get_observing(self.get_id())
def notify_observers(self):
"""
Tell all observers to refresh their view of this object.
It's uncertain whether constraints should be placed on calling
this method. First of all, the previous restriction was unworkable,
because non-privileged objects can still be allowed to enter and
leave another object freely, and will need to call the corresponding
locations.
It might be better to just make the notification happen inside the
enter and leave verbs, but they'd still need to be changed so that
they are run with different permissions than the active user, something
that's probably wise to avoid.
# [ACL] allowed to write on this
"""
# self.check('write', self)
for observer in self.get_observers():
if(observer.has_callable_verb('look')):
observer.look(self)
def clear_observers(self):
"""
Clear the list of observers.
[ACL] allowed to write on this
"""
self.check('write', self)
self._ex.clear_observers(self.get_id())
def get_name(self, real=False):
"""
Get the name of this object.
If it exists, return the property called 'name' first.
If real is True, return the actual name only.
[ACL] allowed to read on this
"""
self.check('read', self)
if(real or 'name' not in self):
return self._name
else:
return self['name'].value
def find(self, name):
"""
Find a object contained directly inside by name.
"""
return self._ex.find(self.get_id(), name)
def contains(self, subject, recurse=True):
"""
Is the provided object inside this one.
Optionally supply recurse=True to recurse through nested objects.
"""
return self._ex.contains(self.get_id(), subject.get_id(), recurse)
def get_contents(self):
"""
Get a list of objects immediately inside this one.
"""
return self._ex.get_contents(self.get_id())
def set_location(self, location):
"""
Set this object's location to the provided object.
[ACL] allowed to move this
"""
self.check('move', self)
if(location and self.contains(location)):
raise errors.RecursiveError("Sorry, '%s' already contains '%s'" % (self, location))
if(location and location.has_verb('accept')):
if not(location.accept(self)):
raise errors.PermissionError("%s won't let %s inside." % (location, self))
old_location = self.get_location()
if(old_location and old_location.has_verb('provide')):
if not(old_location.provide(self)):
raise errors.PermissionError("%s won't let %s out." % (old_location, self))
if(location and location.has_verb('enter')):
location.enter(self)
self._location_id = location.get_id() if location else None
self.save()
if(location is not old_location):
self.clear_observers()
if(old_location and old_location.has_verb('exit')):
old_location.exit(self)
self.save()
if(old_location):
old_location.notify_observers()
if(location):
location.notify_observers()
if(self.is_player() and old_location is not location):
if(old_location):
old_location.remove_observer(self)
if(location):
location.add_observer(self)
def get_location(self):
"""
Get this object's location.
[ACL] allowed to read on this
"""
self.check('read', self)
if not(self._location_id):
return None
return self._ex.instantiate('object', id=self._location_id)
def has_parent(self, parent):
"""
Return true if the provided object is a parent of this object.
"""
return self._ex.has_parent(self.get_id(), parent.get_id())
def get_parents(self, recurse=False):
"""
Get a list of immediate parents to this object.
Optionally, supply recurse=True to get *all* parents.
[ACL] allowed to read on this
"""
self.check('read', self)
return self._ex.get_parents(self._id, recurse)
def remove_parent(self, parent):
"""
Remove a parent from this object.
[ACL] allowed to transmute this
"""
self.check('transmute', self)
self._ex.remove_parent(parent.get_id(), self.get_id())
def add_parent(self, parent):
"""
Add a parent to this object.
[ACL] allowed to transmute this
[ACL] allowed to derive from parent
"""
self.check('transmute', self)
self.check('derive', parent)
if(parent.has_parent(self)):
raise errors.RecursiveError("Sorry, '%s' is already parent to '%s'" % (self, parent))
self._ex.add_parent(self.get_id(), parent.get_id())
def is_allowed(self, permission, subject, fatal=False):
"""
Is this object allowed to do `permission` on `subject`?
"""
access = self._ex.is_allowed(self, permission, subject)
if(not access and fatal):
raise errors.AccessError(self, permission, subject)
return access
name = property(get_name, set_name)
location = property(get_location, set_location)
contents = property(get_contents)
parents = property(get_parents)
observers = property(get_observers)
class Verb(Entity):
"""
Verbs encapsulate in-game Python code with ownership and access rules.
"""
__slots__ = ['_id', '_origin_id', '_source_id', '_ex', '_code', '_owner_id', '_ability', '_method']
def __init__(self, origin):
"""
Create a verb record and attach it to object.
"""
self._id = 0
self._origin_id = origin.get_id()
self._ex = origin.get_exchange()
self._code = ''
self._filename = None
self._repo_id = None
self._ref = None
self._owner_id = None
self._ability = False
self._method = False
def __call__(self, *args, **kwargs):
"""
Call this verb as a method.
[ACL] allowed to execute this
"""
if not(self._method):
raise RuntimeError("%s is not a method." % self)
self.check('execute', self)
from antioch.core import parser
default_parser = parser.get_default_parser(self)
env = default_parser.get_environment()
env['args'] = args
env['kwargs'] = kwargs
return code.r_exec(default_parser.caller, self._get_code(), env, filename=repr(self), runtype="method")
def __str__(self):
"""
Return a string representation of this class.
"""
return "Verb %s%s%s {#%s on %s}" % (
['', '@'][self._ability], self.name, ['', '()'][self._method], self._id, self.origin
)
def get_details(self):
"""
Get the essential details about this verb.
"""
return dict(
id = self.get_id(),
kind = self.get_type(),
__str__ = str(self),
exec_type = 'verb' if not self._method else 'method' if not self._ability else 'ability',
owner = str(self.get_owner()),
names = self.get_names(),
code = str(self.get_code()),
origin = str(self.get_origin()),
)
def execute(self, parser):
"""
Execute this verb, called by the provided parser instance.
[ACL] allowed to execute this
"""
self.check('execute', self)
code.r_exec(parser.caller, self._get_code(), parser.get_environment(), filename=repr(self), runtype="verb")
def add_name(self, name):
"""
Add a name or alias for this verb.
[ACL] allowed to write to this
"""
self.check('write', self)
return self._ex.add_verb_name(self.get_id(), name)
def remove_name(self, name):
"""
Remove a name or alias for this verb.
[ACL] allowed to write to this
"""
self.check('write', self)
return self._ex.remove_verb_name(self.get_id(), name)
def get_names(self):
"""
Get a list of names for this verb.
"""
# self.check('read', self)
return self._ex.get_verb_names(self.get_id())
def set_names(self, given_names):
"""
Update the list of names for this verb.
"""
old_names = self.get_names()
[self.remove_name(n) for n in old_names if n not in given_names]
[self.add_name(n) for n in given_names if n not in old_names]
def set_code(self, code):
"""
Set the Python code for this verb.
[ACL] allowed to develop this
"""
self.check('develop', self)
self._code = code
self.save()
def get_code(self):
"""
Get the Python code for this verb
[ACL] allowed to read this
"""
self.check('read', self)
return self._get_code()
def _get_code(self):
return self._code
def set_ability(self, ability):
"""
Mark this verb as an ability (only parseable by origin).
[ACL] allowed to develop this
"""
self.check('develop', self)
self._ability = bool(ability)
self.save()
def is_ability(self):
"""
Is this verb an ability?
"""
# self.check('read', self)
return self._ability
def set_method(self, method):
"""
Allow this verb to be called by method syntax.
[ACL] allowed to develop this
"""
self.check('develop', self)
self._method = bool(method)
self.save()
def is_method(self):
"""
Is this verb callable as a method?
"""
# self.check('read', self)
return self._method
def is_executable(self):
"""
Is this verb executable?
[ACL] allowed to execute this
"""
try:
self.check('execute', self)
except errors.PermissionError as e:
return False
return True
def performable_by(self, caller):
"""
Is this verb executable by a particular caller?
"""
# if(self.is_method()):
# return False
if not(caller.is_allowed('execute', self)):
return False
elif(self.is_ability()):
if(self._ex.has_parent(caller.get_id(), self._origin_id)):
return True
return caller.get_id() == self._origin_id
return False
name = property(lambda x: x.get_names().pop(0))
names = property(get_names)
code = property(get_code, set_code)
ability = property(is_ability, set_ability)
method = property(is_method, set_method)
executable = property(is_executable)
class Property(Entity):
"""
Properties encapsulate in-game Python values with ownership and access rules.
Properties can store any value than can be encoded to JSON, including object,
verb, and property references, which are handled by a customized parser.
"""
__slots__ = ['_id', '_origin_id', '_source_id', '_ex', '_name', '_value', '_type', '_owner_id']
def __init__(self, origin):
"""
Create a property record and attach it to object.
"""
self._id = 0
self._origin_id = origin.get_id()
self._ex = origin.get_exchange()
self._name = ''
self._value = None
self._type = 'string'
self._owner_id = None
def __str__(self):
"""
Return a string representation of this class.
"""
return 'Property %r {#%s on %s}' % (self._name, self._id, self.origin)
def get_details(self):
"""
Get the essential details about this property.
"""
value = self._value
value = ason.dumps(self._value) if not isinstance(self._value, str) else self._value
return dict(
id = self.get_id(),
kind = self.get_type(),
__str__ = str(self),
owner = str(self.get_owner()),
name = self.get_name(),
value = value.encode('utf8'),
type = str(self._type),
origin = str(self.get_origin()),
)
def is_readable(self):
"""
Is this property readable?
"""
try:
self.check('read', self)
except errors.PermissionError as e:
return False
return True
def set_name(self, name):
"""
Set this property's name.
[ACL] allowed to write to this
"""
self.check('write', self)
self._name = name
self.save()
def get_name(self):
"""
Get this property's name.
[ACL] allowed to read from this
"""
self.check('read', self)
return self._name
def set_value(self, value, type='string'):
"""
Set this property's value.
[ACL] allowed to write to this
"""
self.check('write', self)
self._value = value
self._type = type
self.save()
def get_value(self):
"""
Get this property's value.
[ACL] allowed to read from this
"""
self.check('read', self)
return self._value
value = property(get_value, set_value)
name = property(get_name, set_name)
readable = property(is_readable)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/interface.py",
"copies": "1",
"size": "30511",
"license": "mit",
"hash": -1406239294605088500,
"line_mean": 29.664321608,
"line_max": 115,
"alpha_frac": 0.534168005,
"autogenerated": false,
"ratio": 4.218304991013411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252472996013411,
"avg_score": null,
"num_lines": null
} |
"""
SQL building facilities.
"""
import sys, types, time, datetime, array, decimal
def strftime(dt, fmt):
"""
Format the provided datetime object using the given format string.
This function will properly format dates before 1900.
"""
# I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years
# excepting the 400 year leap years. But only if
# you're using the Gregorian calendar.
# Created by Andrew Dalke
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/306860
if(dt == None):
return ''
# WARNING: known bug with "%s", which is the number
# of seconds since the epoch. This is too harsh
# of a check. It should allow "%%s".
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return time.strftime(fmt, dt.timetuple())
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
def _findall(text, substr):
"""
matching support function.
"""
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
def build_insert(table, data=None, **kwargs):
"""
Given a table name and a dictionary, construct an INSERT query. Keys are
sorted alphabetically before output, so the result of passing a semantically
identical dictionary should be the same every time.
Use modu.sql.RAW to embed SQL directly in the VALUES clause.
@param table: the desired table name
@type table: str
@param data: a column name to value map
@type data: dict
@returns: an SQL query
@rtype: str
"""
if(data is None and kwargs):
data = kwargs
if not(data):
raise ValueError("data argument to build_insert() is empty.")
if not(isinstance(data, (list, tuple))):
data = [data]
keys = sorted(data[0].keys())
values = []
query = 'INSERT INTO %s (%s) VALUES ' % (table, ', '.join(keys))
for index in range(len(data)):
row = data[index]
values.extend([row[key] for key in keys])
query += '(' + ', '.join(['%s'] * len(row)) + ')'
if(index < len(data) - 1):
query += ', '
return interp(query, *values)
def build_set(data=None, **kwargs):
"""
Given a dictionary, construct a SET clause. Keys are sorted alphabetically
before output, so the result of passing a semantically identical dictionary
should be the same every time.
@param data: a column name to value map
@type data: dict
@returns: an SQL fragment
@rtype: str
"""
if(data is None):
data = {}
data.update(kwargs)
keys = list(data.keys())
keys.sort()
values = [data[key] for key in keys]
set_clause = 'SET %s' % (', '.join(['%s = %%s'] * len(data)) % tuple(keys))
return interp(set_clause, *values)
def build_update(table, data, constraints):
"""
Given a table name, a dictionary, and a set of constraints, construct an UPDATE
query. Keys are sorted alphabetically before output, so the result of passing
a semantically identical dictionary should be the same every time.
@param table: the desired table name
@type table: str
@param data: a column name to value map
@type data: dict
@param constraints: a column name to value map
@type constraints: dict
@returns: an SQL query
@rtype: str
@seealso: L{build_where()}
"""
query_stub = 'UPDATE %s ' % table
return query_stub + build_set(data) + ' ' + build_where(constraints)
def build_select(table, data=None, **kwargs):
"""
Given a table name and a dictionary, construct a SELECT query. Keys are
sorted alphabetically before output, so the result of passing a semantically
identical dictionary should be the same every time.
These SELECTs always select * from a single table.
Special keys can be inserted in the provided dictionary, such that:
- B{__select_keyword}: is inserted between 'SELECT' and '*'
- B{__select_fields}: is used after 'SELECT' instead of '*'
@param table: the desired table name
@type table: str
@param data: a column name to value map
@type data: dict
@returns: an SQL query
@rtype: str
@seealso: L{build_where()}
"""
if(data is None):
data = {}
data.update(kwargs)
fields = data.get('__select_fields', None)
if(fields is None):
fields = '*'
else:
if(isinstance(fields, (list, tuple))):
fields = ','.join(fields)
fields = '%s' % fields
if('__select_keyword' in data):
query = "SELECT %s %s FROM %s " % (data['__select_keyword'], fields, table)
else:
query = "SELECT %s FROM %s " % (fields, table)
return query + build_where(data)
def build_delete(table, constraints=None, **kwargs):
"""
Given a table name, and a set of constraints, construct a DELETE query.
Keys are sorted alphabetically before output, so the result of passing
a semantically identical dictionary should be the same every time.
@param table: the desired table name
@type table: str
@param constraints: a column name to value map
@type constraints: dict
@returns: an SQL query
@rtype: str
@seealso: L{build_where()}
"""
if(constraints is None):
constraints = {}
constraints.update(kwargs)
query_stub = 'DELETE FROM %s ' % table
return query_stub + build_where(constraints)
def build_where(data=None, use_where=True, **kwargs):
"""
Given a dictionary, construct a WHERE clause. Keys are sorted alphabetically
before output, so the result of passing a semantically identical dictionary
should be the same every time.
Special keys can be inserted in the provided dictionary, such that:
- B{__order_by}: inserts an ORDER BY clause. ASC/DESC must be
part of the string if you wish to use them
- B{__limit}: add a LIMIT clause to this query
Additionally, certain types of values have alternate output:
- B{list/tuple types}: result in an IN statement
- B{None} results in an ISNULL statement
- B{sql.RAW objects}: result in directly embedded SQL, such that
C{'col1':RAW("%s = ENCRYPT('whatever')")} equals
C{col1 = ENCRYPT('whatever')}
- B{persist.NOT objects}: result in a NOT statement
@param data: a column name to value map
@type data: dict
@param use_where: should the result start with "WHERE"? (Default: True)
@type use_where: bool
@returns: an SQL fragment
@rtype: str
"""
if(data is None):
data = {}
data.update(kwargs)
query = ''
criteria = []
values = []
keys = list(data.keys())
keys.sort()
for key in keys:
if(key.startswith('_')):
continue
value = data[key]
if(isinstance(value, list) or isinstance(value, tuple)):
criteria.append('%s IN (%s)' % (key, ', '.join(['%s'] * len(value))))
values.extend(value)
elif(isinstance(value, NOT)):
if(isinstance(value.value, list) or isinstance(value.value, tuple)):
criteria.append('%s NOT IN (%s)' % (key, ', '.join(['%s'] * len(value.value))))
values.extend(value.value)
else:
criteria.append('%s <> %%s' % key)
values.append(value.value)
elif(isinstance(value, GT)):
criteria.append('%s > %%s' % key)
values.append(value.value)
elif(isinstance(value, LT)):
criteria.append('%s < %%s' % key)
values.append(value.value)
# This goes last, since the NOT, GT, and LT are RAW subclasses,
# and I don't like the more specific syntax
elif(isinstance(value, RAW)):
if(value.value.find('%s') != -1):
criteria.append(value.value % key)
else:
criteria.append('%s%s' % (key, value.value))
elif(value is None):
criteria.append('%s IS NULL' % key)
else:
criteria.append('%s = %%s' % key)
values.append(value)
if(criteria):
if(use_where):
query += 'WHERE '
query += ' AND '.join(criteria)
if('__order_by' in data):
query += ' ORDER BY %s' % data['__order_by']
if('__group_by' in data):
query += ' GROUP BY %s' % data['__group_by']
if('__limit' in data):
query += ' LIMIT %s' % data['__limit']
return interp(query, *values)
def make_list(items):
"""
Convert a list of things to a string suitable for use with IN.
Uses interp to escape values.
"""
return interp(','.join(['%s'] * len(items)), *items)
def escape_sequence(seq, conv):
return [escape_item(item, conv) for item in seq]
def escape_item(item, conv):
return conv.get(type(item), conv[str])(item, conv)
def quoted_string_literal(s, d):
# okay, so, according to the SQL standard, this should be all you need to do to escape
# any kind of string.
try:
return "'%s'" % s.replace("'", "''")
except TypeError as e:
raise NotImplementedError("Cannot quote %r objects: %r: %s" % (type(s), s, e))
def mysql_string_literal(s, d):
from MySQLdb import converters
return converters.string_literal(s, d)
def interp(query, *args):
"""
Interpolate the provided arguments into the provided query, using
the DB-API's default conversions, with the additional 'RAW' support
from modu.sql.RAW2Literal
@param query: A query string with placeholders
@type query: str
@param args: A list of query values
@type args: sequence
@returns: an interpolated SQL query
@rtype: str
"""
parameters = escape_sequence(args, conversions)
return query % tuple(parameters)
class RAW(object):
"""
Allows RAW SQL to be embedded in constructed queries.
@ivar value: "Raw" (i.e., properly escaped) SQL
"""
def __init__(self, value):
"""
Create a RAW SQL fragment
"""
self.value = value
def __repr__(self):
"""
Printable version.
"""
return "%s(%r)" % (self.__class__.__name__, self.value)
class NOT(RAW):
"""
Allows NOTs to be embedded in constructed queries.
When sql.NOT(value) is included in the constraint array passed
to a query building function, it will generate the SQL fragment
'column <> value'
@ivar value: The value to NOT be
"""
class GT(RAW):
"""
Allow for use of a greater-than.
When sql.GT(value) is included in the constraint array passed
to a query building function, it will generate the SQL fragment
'column > value'
@ivar value: The value to be greater-than
"""
class LT(RAW):
"""
Allow for use of a less-than.
When sql.LT(value) is included in the constraint array passed
to a query building function, it will generate the SQL fragment
'column < value'
@ivar value: The value to be less-than
"""
string_literal = quoted_string_literal
conversions = {
int: lambda s,d: str(s),
float: lambda o,d: '%.15g' % o,
type(None): lambda s,d: 'NULL',
list: lambda s,d: '(%s)' % ','.join([escape_item(x, conversions) for x in s]),
tuple: lambda s,d: '(%s)' % ','.join([escape_item(x, conversions) for x in s]),
bool: lambda s,d: string_literal(('0', '1')[s], d),
datetime.date: lambda d,c: string_literal(strftime(d, "%Y-%m-%d"), c),
datetime.datetime: lambda d,c: string_literal(strftime(d, "%Y-%m-%d %H:%M:%S"), c),
datetime.timedelta: lambda v,c: string_literal('%d %d:%d:%d' % (v.days, int(v.seconds / 3600) % 24, int(v.seconds / 60) % 60, int(v.seconds) % 60)),
RAW: lambda o,d: o.value,
decimal.Decimal: lambda s,d: str(s),
}
if sys.version_info >= (3,0):
conversions[str] = lambda s,d: string_literal(s, d)
conversions[bytes] = lambda s,d: string_literal(s.encode(), d)
else:
conversions[long] = lambda s,d: str(s)
conversions[str] = lambda s,d: string_literal(s.encode(), d)
conversions[unicode] = lambda s,d: string_literal(s, d)
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/util/sql.py",
"copies": "1",
"size": "13461",
"license": "mit",
"hash": -108087687997182980,
"line_mean": 30.0877598152,
"line_max": 152,
"alpha_frac": 0.5847262462,
"autogenerated": false,
"ratio": 3.850400457665904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9772891843678642,
"avg_score": 0.03244697203745242,
"num_lines": 433
} |
#!antioch
if not has_pobj_str('on'):
stype = 'object'
if(has_dobj()):
subject = get_dobj()
else:
subject = get_object(get_dobj_str())
else:
if(has_pobj('on')):
origin = get_pobj('on')
else:
origin = get_object(get_pobj_str('on'))
subjects = get_dobj_str().split(' ', 1)
if(len(subjects) == 2):
stype, name = subjects
else:
stype = None
name = subjects[0]
if(stype == 'verb'):
subject = origin.get_verb(name)
if subject is None:
raise NoSuchVerbError(name)
elif(stype in ('property', 'prop', 'value', 'val')):
subject = origin.get_property(name)
if subject is None:
raise NoSuchPropertyError(name, origin)
else:
subject = origin.get_verb(name) or origin.get_property(name)
if subject is None:
raise NoSuchPropertyError(name, origin)
def format_verb(v):
klass = 'info' if v['ability'] else 'primary'
return (klass, "%(name)s%(method)s" % dict(
name = v['names'],
method = '()' if v['method'] else ''
))
details = subject.get_details()
stype = subject.get_type()
if stype == 'verb':
sname = ', '.join(details['names'])
else:
sname = details['name']
output = '<div class="inspection">'
output += '<h3 class="name">%s</h3>' % sname
output += '<ul>'
output += '<li><b>Owner:</b> %s</li>' % details['owner']
if(stype == 'object'):
ancestors = subject.get_ancestors()
output += '<li><b>Ancestors:</b> %s</li>' % ', '.join(str(x) for x in ancestors)
output += '<li><b>Location:</b> %s</li>' % details['location']
output += '<li><b>Contents:</b> %s</li>' % ', '.join(str(x) for x in subject.contents)
output += '<li><b>Verbs:</b><ul>'
for verb in details['verbs']:
output += '<li><a href="#" class="badge badge-pill badge-%s">%s</a></li>' % format_verb(verb)
for obj in ancestors:
d = obj.get_details()
if(d['verbs']):
output += "<li><small>%s</small><ul class=\"inherited\">" % d['name']
for verb in d['verbs']:
output += '<li><a href="#" class="badge badge-pill badge-%s">%s</a></li>' % format_verb(verb)
output += "</ul></li>"
output += '</ul></li>'
output += '<li><b>Properties:</b><ul>'
for prop in details['properties']:
output += '<li><a href="#" class="badge badge-pill badge-primary">%s</a></li>' % prop['name']
for obj in ancestors:
d = obj.get_details()
if(d['properties']):
output += "<li><small>%s</small><ul class=\"inherited\">" % d['name']
for prop in d['properties']:
output += '<li><a href="#" class="badge badge-pill badge-primary">%s</a></li>' % prop['name']
output += "</ul></li>"
output += '</ul></li>'
elif(stype == 'verb'):
output += '<li><b>Type:</b> %s</li>' % details['exec_type']
elif(stype in ('property', 'prop', 'value', 'val')):
output += '<li><b>Type:</b> %s</li>' % details['type']
output += '</ul>'
output += '</div>'
write(caller, output, escape_html=False) | {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/bootstrap/default_verbs/author_class_inspect.py",
"copies": "1",
"size": "3130",
"license": "mit",
"hash": 8955892736202530000,
"line_mean": 33.4065934066,
"line_max": 109,
"alpha_frac": 0.5325878594,
"autogenerated": false,
"ratio": 3.2843651626442814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9277958398933766,
"avg_score": 0.0077989246221032375,
"num_lines": 91
} |
#!antioch
if(runtype == 'method'):
obj = args[0] if args else caller.location
target = this
else:
if(has_dobj_str()):
obj = get_dobj()
target = caller
else:
obj = caller.get_location()
target = caller
current = target.get_observing()
if(current and current is not obj):
current.remove_observer(target)
if(obj and obj is not current):
obj.add_observer(target)
import hashlib
def gravatar_url(email):
m = hashlib.md5()
m.update(email.strip().lower())
return 'http://www.gravatar.com/avatar/%s.jpg?d=mm' % m.hexdigest()
if(obj):
observations = dict(
id = obj.get_id(),
name = obj.get_name(),
location_id = str(obj.get_location()) or 0,
description = obj.get('description', 'Nothing much to see here.').value,
contents = [
dict(
type = item.is_player(),
name = item.get_name(),
image = gravatar_url(item['gravatar_id'].value) if 'gravatar_id' in item else None,
mood = item.get('mood', None).value,
) for item in obj.get_contents() if item.get('visible', True).value
],
)
if(obj.is_connected_player() and caller != target):
write(obj, "%s looks at you" % target.get_name())
else:
observations = dict(
id = None,
name = 'The Void',
location_id = None,
description = 'A featureless expanse of gray nothingness.',
contents = [],
)
if(obj.id == caller.location.id):
observe(target, observations)
else:
output = '<h3 class="name">%s</h3>' % observations['name']
output = output + '<p class="lead description">%s</p>' % observations['description']
write(target, output, escape_html=False) | {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/bootstrap/default_verbs/player_class_look.py",
"copies": "1",
"size": "1877",
"license": "mit",
"hash": -5958852698472515000,
"line_mean": 31.9473684211,
"line_max": 102,
"alpha_frac": 0.5482152371,
"autogenerated": false,
"ratio": 3.658869395711501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9599953440095194,
"avg_score": 0.021426238543261443,
"num_lines": 57
} |
# antioch
#
#
import os
import setuptools
# disables creation of .DS_Store files inside tarballs on Mac OS X
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
dist = setuptools.setup(
name = "antioch",
version = "0.9",
packages = setuptools.find_packages(),
author = "Phil Christensen",
author_email = "phil@bubblehouse.org",
description = "a next-generation MUD/MOO-like virtual world engine",
license = "MIT",
keywords = "antioch moo lambdamoo mud game",
url = "https://github.com/philchristensen/antioch",
long_description = """antioch is a web application for building scalable, interactive virtual worlds.
Begun as a MOO-like system for building virtual worlds, antioch aims to
take the LambdaMOO approach to creating online worlds, and update it in hopes
of attracting new players to an old idea.
""".replace('\t', '').replace('\n', ''),
)
| {
"repo_name": "philchristensen/antioch",
"path": "setup.py",
"copies": "1",
"size": "1146",
"license": "mit",
"hash": 7153396612457010000,
"line_mean": 39.9285714286,
"line_max": 107,
"alpha_frac": 0.5776614311,
"autogenerated": false,
"ratio": 3.9246575342465753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002318965346575,
"avg_score": null,
"num_lines": null
} |
#!antioch
username = args[0]
password = args[1]
if(username != 'guest'):
return None
room = get_object("The Lobby")
guest_class = get_object('guest class')
# get the list of available guest names, which
# also sets the total number of possible guests
if('guest_names' in system):
guest_names = system['guest_names'].value
else:
guest_names = system.add_property('guest_names').value = [
'Red Guest', 'Blue Guest', 'Yellow Guest',
'Green Guest', 'Orange Guest', 'Purple Guest',
]
# get the registry where we keep the guest objects
if('guestbook' in system):
guestbook = system['guestbook'].value
else:
guestbook = system.add_property('guestbook').value = dict()
# the first time, we will create guest objects
# for all guest names
guest_object = None
for name in guest_names:
if(name in guestbook and not guestbook[name].is_connected_player()):
guest_object = guestbook[name]
break
elif(count_named(name) == 0):
guest_object = guestbook[name] = create_object(name, unique_name=True)
guest_object.location = room
guest_object.set_player(True)
guest_object.add_parent(guest_class)
guest_object.set_owner(guest_object)
break
else:
print("[guests] rejected, too many guests")
raise PermissionError("Sorry, there are too many guests already.")
system['guestbook'].value = guestbook
return guest_object
| {
"repo_name": "philchristensen/antioch",
"path": "antioch/core/bootstrap/default_verbs/system_authenticate.py",
"copies": "1",
"size": "1424",
"license": "mit",
"hash": 977828115343129000,
"line_mean": 28.6666666667,
"line_max": 78,
"alpha_frac": 0.6762640449,
"autogenerated": false,
"ratio": 3.5160493827160493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46923134276160494,
"avg_score": null,
"num_lines": null
} |
# antispam.py: Basic services-side spamfilters for IRC
from pylinkirc import conf, utils
from pylinkirc.log import log
mydesc = ("Provides anti-spam functionality.")
sbot = utils.register_service("antispam", default_nick="AntiSpam", desc=mydesc)
def die(irc=None):
utils.unregister_service("antispam")
_UNICODE_CHARMAP = {
'A': 'AΑАᎪᗅᴀ𝐀𝐴𝑨𝒜𝓐𝔄𝔸𝕬𝖠𝗔𝘈𝘼𝙰𝚨𝛢𝜜𝝖𝞐',
'B': 'BʙΒВвᏴᗷᛒℬ𐌁𝐁𝐵𝑩𝓑𝔅𝔹𝕭𝖡𝗕𝘉𝘽𝙱𝚩𝛣𝜝𝝗𝞑',
'C': 'CϹСᏟℂℭⅭⲤ𐌂𝐂𝐶𝑪𝒞𝓒𝕮𝖢𝗖𝘊𝘾𝙲',
'D': 'DᎠᗞᗪᴅⅅⅮ𝐃𝐷𝑫𝒟𝓓𝔇𝔻𝕯𝖣𝗗𝘋𝘿𝙳',
'E': 'EΕЕᎬᴇℰ⋿ⴹ𝐄𝐸𝑬𝓔𝔈𝔼𝕰𝖤𝗘𝘌𝙀𝙴𝚬𝛦𝜠𝝚𝞔',
'F': 'FϜᖴℱ𝐅𝐹𝑭𝓕𝔉𝔽𝕱𝖥𝗙𝘍𝙁𝙵𝟊',
'G': 'GɢԌԍᏀᏳ𝐆𝐺𝑮𝒢𝓖𝔊𝔾𝕲𝖦𝗚𝘎𝙂𝙶',
'H': 'HʜΗНнᎻᕼℋℌℍⲎ𝐇𝐻𝑯𝓗𝕳𝖧𝗛𝘏𝙃𝙷𝚮𝛨𝜢𝝜𝞖',
'J': 'JЈᎫᒍᴊ𝐉𝐽𝑱𝒥𝓙𝔍𝕁𝕵𝖩𝗝𝘑𝙅𝙹',
'K': 'KΚКᏦᛕKⲔ𝐊𝐾𝑲𝒦𝓚𝔎𝕂𝕶𝖪𝗞𝘒𝙆𝙺𝚱𝛫𝜥𝝟𝞙',
'L': 'LʟᏞᒪℒⅬ𝐋𝐿𝑳𝓛𝔏𝕃𝕷𝖫𝗟𝘓𝙇𝙻',
'M': 'MΜϺМᎷᗰᛖℳⅯⲘ𐌑𝐌𝑀𝑴𝓜𝔐𝕄𝕸𝖬𝗠𝘔𝙈𝙼𝚳𝛭𝜧𝝡𝞛',
'N': 'NɴΝℕⲚ𝐍𝑁𝑵𝒩𝓝𝔑𝕹𝖭𝗡𝘕𝙉𝙽𝚴𝛮𝜨𝝢𝞜',
'P': 'PΡРᏢᑭᴘᴩℙⲢ𝐏𝑃𝑷𝒫𝓟𝔓𝕻𝖯𝗣𝘗𝙋𝙿𝚸𝛲𝜬𝝦𝞠',
'Q': 'Qℚⵕ𝐐𝑄𝑸𝒬𝓠𝔔𝕼𝖰𝗤𝘘𝙌𝚀',
'R': 'RƦʀᎡᏒᖇᚱℛℜℝ𝐑𝑅𝑹𝓡𝕽𝖱𝗥𝘙𝙍𝚁',
'S': 'SЅՏᏕᏚ𝐒𝑆𝑺𝒮𝓢𝔖𝕊𝕾𝖲𝗦𝘚𝙎𝚂',
'T': 'TΤτТтᎢᴛ⊤⟙Ⲧ𐌕𝐓𝑇𝑻𝒯𝓣𝔗𝕋𝕿𝖳𝗧𝘛𝙏𝚃𝚻𝛕𝛵𝜏𝜯𝝉𝝩𝞃𝞣𝞽',
'U': 'UՍሀᑌ∪⋃𝐔𝑈𝑼𝒰𝓤𝔘𝕌𝖀𝖴𝗨𝘜𝙐𝚄',
'V': 'VѴ٧۷ᏙᐯⅤⴸ𝐕𝑉𝑽𝒱𝓥𝔙𝕍𝖁𝖵𝗩𝘝𝙑𝚅',
'W': 'WԜᎳᏔ𝐖𝑊𝑾𝒲𝓦𝔚𝕎𝖂𝖶𝗪𝘞𝙒𝚆',
'X': 'XΧХ᙭ᚷⅩ╳Ⲭⵝ𐌗𐌢𝐗𝑋𝑿𝒳𝓧𝔛𝕏𝖃𝖷𝗫𝘟𝙓𝚇𝚾𝛸𝜲𝝬𝞦',
'Y': 'YΥϒУҮᎩᎽⲨ𝐘𝑌𝒀𝒴𝓨𝔜𝕐𝖄𝖸𝗬𝘠𝙔𝚈𝚼𝛶𝜰𝝪𝞤',
'Z': 'ZΖᏃℤℨ𝐙𝑍𝒁𝒵𝓩𝖅𝖹𝗭𝘡𝙕𝚉𝚭𝛧𝜡𝝛𝞕',
'a': 'aɑαа⍺𝐚𝑎𝒂𝒶𝓪𝔞𝕒𝖆𝖺𝗮𝘢𝙖𝚊𝛂𝛼𝜶𝝰𝞪',
'b': 'bƄЬᏏᖯ𝐛𝑏𝒃𝒷𝓫𝔟𝕓𝖇𝖻𝗯𝘣𝙗𝚋',
'c': 'cϲсᴄⅽⲥ𝐜𝑐𝒄𝒸𝓬𝔠𝕔𝖈𝖼𝗰𝘤𝙘𝚌',
'd': 'ⅾdԁᏧᑯⅆⅾ𝐝𝑑𝒅𝒹𝓭𝔡𝕕𝖉𝖽𝗱𝘥𝙙𝚍',
'e': 'eеҽ℮ℯⅇ𝐞𝑒𝒆𝓮𝔢𝕖𝖊𝖾𝗲𝘦𝙚𝚎ᥱ',
'f': 'fſϝքẝ𝐟𝑓𝒇𝒻𝓯𝔣𝕗𝖋𝖿𝗳𝘧𝙛𝚏𝟋',
'g': 'gƍɡցᶃℊ𝐠𝑔𝒈𝓰𝔤𝕘𝖌𝗀𝗴𝘨𝙜𝚐',
'h': 'hһհᏂℎ𝐡𝒉𝒽𝓱𝔥𝕙𝖍𝗁𝗵𝘩𝙝𝚑',
'i': 'iıɩɪιіӏᎥℹⅈⅰ⍳ꙇ𝐢𝑖𝒊𝒾𝓲𝔦𝕚𝖎𝗂𝗶𝘪𝙞𝚒𝚤𝛊𝜄𝜾𝝸𝞲',
'j': 'jϳјⅉ𝐣𝑗𝒋𝒿𝓳𝔧𝕛𝖏𝗃𝗷𝘫𝙟𝚓',
'k': 'k𝐤𝑘𝒌𝓀𝓴𝔨𝕜𝖐𝗄𝗸𝘬𝙠𝚔',
'l': 'ⅼ',
'm': 'ⅿm',
'n': 'nոռ𝐧𝑛𝒏𝓃𝓷𝔫𝕟𝖓𝗇𝗻𝘯𝙣𝚗ᥒ',
'o': 'ⲟഠοо',
'p': 'pρϱр⍴ⲣ𝐩𝑝𝒑𝓅𝓹𝔭𝕡𝖕𝗉𝗽𝘱𝙥𝚙𝛒𝛠𝜌𝜚𝝆𝝔𝞀𝞎𝞺𝟈',
'q': 'qԛգզ𝐪𝑞𝒒𝓆𝓺𝔮𝕢𝖖𝗊𝗾𝘲𝙦𝚚',
'r': 'rгᴦⲅ𝐫𝑟𝒓𝓇𝓻𝔯𝕣𝖗𝗋𝗿𝘳𝙧𝚛',
's': 'sƽѕꜱ𝐬𝑠𝒔𝓈𝓼𝔰𝕤𝖘𝗌𝘀𝘴𝙨𝚜',
't': 't𝐭𝑡𝒕𝓉𝓽𝔱𝕥𝖙𝗍𝘁𝘵𝙩𝚝',
'u': 'uʋυսᴜ𝐮𝑢𝒖𝓊𝓾𝔲𝕦𝖚𝗎𝘂𝘶𝙪𝚞𝛖𝜐𝝊𝞄𝞾ᥙ',
'v': 'vνѵטᴠⅴ∨⋁𝐯𝑣𝒗𝓋𝓿𝔳𝕧𝖛𝗏𝘃𝘷𝙫𝚟𝛎𝜈𝝂𝝼𝞶',
'w': 'wɯѡԝաᴡ𝐰𝑤𝒘𝓌𝔀𝔴𝕨𝖜𝗐𝘄𝘸𝙬𝚠',
'x': 'x×хᕁᕽ᙮ⅹ⤫⤬⨯𝐱𝑥𝒙𝓍𝔁𝔵𝕩𝖝𝗑𝘅𝘹𝙭𝚡',
'y': 'yɣʏγуүყᶌỿℽ𝐲𝑦𝒚𝓎𝔂𝔶𝕪𝖞𝗒𝘆𝘺𝙮𝚢𝛄𝛾𝜸𝝲𝞬',
'z': 'zᴢ𝐳𝑧𝒛𝓏𝔃𝔷𝕫𝖟𝗓𝘇𝘻𝙯𝚣',
'/': '᜵⁄∕⧸/',
'\\': '⧵﹨⧹\',
' ': '\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\xa0\u202f\u205f',
'.': '․.',
'-': '˗╴﹣-−⎼',
'!': '﹗!ǃⵑ︕',
':': ':˸։፡᛬⁚∶⠆︓﹕',
'#': '#﹟'
}
def _prep_maketrans(data):
from_s = ''
to_s = ''
for target, chars in data.items():
from_s += chars
to_s += target * len(chars)
return str.maketrans(from_s, to_s)
UNICODE_CHARMAP = _prep_maketrans(_UNICODE_CHARMAP)
PUNISH_OPTIONS = ['kill', 'ban', 'quiet', 'kick', 'block']
EXEMPT_OPTIONS = ['voice', 'halfop', 'op']
DEFAULT_EXEMPT_OPTION = 'halfop'
def _punish(irc, target, channel, punishment, reason):
"""Punishes the target user. This function returns True if the user was successfully punished."""
if target not in irc.users:
log.warning("(%s) antispam: got target %r that isn't a user?", irc.name, target)
return False
elif irc.is_oper(target):
log.debug("(%s) antispam: refusing to punish oper %s/%s", irc.name, target, irc.get_friendly_name(target))
return False
target_nick = irc.get_friendly_name(target)
if channel:
c = irc.channels[channel]
exempt_level = irc.get_service_option('antispam', 'exempt_level', DEFAULT_EXEMPT_OPTION).lower()
if exempt_level not in EXEMPT_OPTIONS:
log.error('(%s) Antispam exempt %r is not a valid setting, '
'falling back to defaults; accepted settings include: %s',
irc.name, exempt_level, ', '.join(EXEMPT_OPTIONS))
exempt_level = DEFAULT_EXEMPT_OPTION
if exempt_level == 'voice' and c.is_voice_plus(target):
log.debug("(%s) antispam: refusing to punish voiced and above %s/%s", irc.name, target, target_nick)
return False
elif exempt_level == 'halfop' and c.is_halfop_plus(target):
log.debug("(%s) antispam: refusing to punish halfop and above %s/%s", irc.name, target, target_nick)
return False
elif exempt_level == 'op' and c.is_op_plus(target):
log.debug("(%s) antispam: refusing to punish op and above %s/%s", irc.name, target, target_nick)
return False
my_uid = sbot.uids.get(irc.name)
# XXX workaround for single-bot protocols like Clientbot
if irc.pseudoclient and not irc.has_cap('can-spawn-clients'):
my_uid = irc.pseudoclient.uid
bans = set()
log.debug('(%s) antispam: got %r as punishment for %s/%s', irc.name, punishment,
target, irc.get_friendly_name(target))
def _ban():
bans.add(irc.make_channel_ban(target))
def _quiet():
bans.add(irc.make_channel_ban(target, ban_type='quiet'))
def _kick():
irc.kick(my_uid, channel, target, reason)
irc.call_hooks([my_uid, 'ANTISPAM_KICK', {'channel': channel, 'text': reason, 'target': target,
'parse_as': 'KICK'}])
def _kill():
if target not in irc.users:
log.debug('(%s) antispam: not killing %s/%s; they already left', irc.name, target,
irc.get_friendly_name(target))
return
userdata = irc.users[target]
irc.kill(my_uid, target, reason)
irc.call_hooks([my_uid, 'ANTISPAM_KILL', {'target': target, 'text': reason,
'userdata': userdata, 'parse_as': 'KILL'}])
kill = False
successful_punishments = 0
for action in set(punishment.split('+')):
if action not in PUNISH_OPTIONS:
log.error('(%s) Antispam punishment %r is not a valid setting; '
'accepted settings include: %s OR any combination of '
'these joined together with a "+".',
irc.name, punishment, ', '.join(PUNISH_OPTIONS))
return
elif action == 'block':
# We only need to increment this for this function to return True
successful_punishments += 1
elif action == 'kill':
kill = True # Delay kills so that the user data doesn't disappear.
# XXX factorize these blocks
elif action == 'kick' and channel:
try:
_kick()
except NotImplementedError:
log.warning("(%s) antispam: Kicks are not supported on this network, skipping; "
"target was %s/%s", irc.name, target_nick, channel)
else:
successful_punishments += 1
elif action == 'ban' and channel:
try:
_ban()
except (ValueError, NotImplementedError):
log.warning("(%s) antispam: Bans are not supported on this network, skipping; "
"target was %s/%s", irc.name, target_nick, channel)
else:
successful_punishments += 1
elif action == 'quiet' and channel:
try:
_quiet()
except (ValueError, NotImplementedError):
log.warning("(%s) antispam: Quiet is not supported on this network, skipping; "
"target was %s/%s", irc.name, target_nick, channel)
else:
successful_punishments += 1
if bans: # Set all bans at once to prevent spam
irc.mode(my_uid, channel, bans)
irc.call_hooks([my_uid, 'ANTISPAM_BAN',
{'target': channel, 'modes': bans, 'parse_as': 'MODE'}])
if kill:
try:
_kill()
except NotImplementedError:
log.warning("(%s) antispam: Kills are not supported on this network, skipping; "
"target was %s/%s", irc.name, target_nick, channel)
else:
successful_punishments += 1
if not successful_punishments:
log.warning('(%s) antispam: Failed to punish %s with %r, target was %s', irc.name,
target_nick, punishment, channel or 'a PM')
return bool(successful_punishments)
MASSHIGHLIGHT_DEFAULTS = {
'min_length': 50,
'min_nicks': 5,
'reason': "Mass highlight spam is prohibited",
'punishment': 'kick+ban',
'enabled': False
}
def handle_masshighlight(irc, source, command, args):
"""Handles mass highlight attacks."""
channel = args['target']
text = args['text']
mhl_settings = irc.get_service_option('antispam', 'masshighlight',
MASSHIGHLIGHT_DEFAULTS)
if not mhl_settings.get('enabled', False):
return
my_uid = sbot.uids.get(irc.name)
# XXX workaround for single-bot protocols like Clientbot
if irc.pseudoclient and not irc.has_cap('can-spawn-clients'):
my_uid = irc.pseudoclient.uid
if (not irc.connected.is_set()) or (not my_uid):
# Break if the network isn't ready.
log.debug("(%s) antispam.masshighlight: skipping processing; network isn't ready", irc.name)
return
elif not irc.is_channel(channel):
# Not a channel - mass highlight blocking only makes sense within channels
log.debug("(%s) antispam.masshighlight: skipping processing; %r is not a channel", irc.name, channel)
return
elif irc.is_internal_client(source):
# Ignore messages from our own clients.
log.debug("(%s) antispam.masshighlight: skipping processing message from internal client %s", irc.name, source)
return
elif source not in irc.users:
log.debug("(%s) antispam.masshighlight: ignoring message from non-user %s", irc.name, source)
return
elif channel not in irc.channels or my_uid not in irc.channels[channel].users:
# We're not monitoring this channel.
log.debug("(%s) antispam.masshighlight: skipping processing message from channel %r we're not in", irc.name, channel)
return
elif len(text) < mhl_settings.get('min_length', MASSHIGHLIGHT_DEFAULTS['min_length']):
log.debug("(%s) antispam.masshighlight: skipping processing message %r; it's too short", irc.name, text)
return
if irc.get_service_option('antispam', 'strip_formatting', True):
text = utils.strip_irc_formatting(text)
# Strip :, from potential nicks
words = [word.rstrip(':,') for word in text.split()]
userlist = [irc.users[uid].nick for uid in irc.channels[channel].users.copy()]
min_nicks = mhl_settings.get('min_nicks', MASSHIGHLIGHT_DEFAULTS['min_nicks'])
# Don't allow repeating the same nick to trigger punishment
nicks_caught = set()
punished = False
for word in words:
if word in userlist:
nicks_caught.add(word)
if len(nicks_caught) >= min_nicks:
# Get the punishment and reason.
punishment = mhl_settings.get('punishment', MASSHIGHLIGHT_DEFAULTS['punishment']).lower()
reason = mhl_settings.get('reason', MASSHIGHLIGHT_DEFAULTS['reason'])
log.info("(%s) antispam: punishing %s => %s for mass highlight spam",
irc.name,
irc.get_friendly_name(source),
channel)
punished = _punish(irc, source, channel, punishment, reason)
break
log.debug('(%s) antispam.masshighlight: got %s/%s nicks on message to %r', irc.name,
len(nicks_caught), min_nicks, channel)
return not punished # Filter this message from relay, etc. if it triggered protection
utils.add_hook(handle_masshighlight, 'PRIVMSG', priority=1000)
utils.add_hook(handle_masshighlight, 'NOTICE', priority=1000)
TEXTFILTER_DEFAULTS = {
'reason': "Spam is prohibited",
'punishment': 'kick+ban+block',
'watch_pms': False,
'enabled': False,
'munge_unicode': True,
}
def handle_textfilter(irc, source, command, args):
"""Antispam text filter handler."""
target = args['target']
text = args['text']
txf_settings = irc.get_service_option('antispam', 'textfilter',
TEXTFILTER_DEFAULTS)
if not txf_settings.get('enabled', False):
return
my_uid = sbot.uids.get(irc.name)
# XXX workaround for single-bot protocols like Clientbot
if irc.pseudoclient and not irc.has_cap('can-spawn-clients'):
my_uid = irc.pseudoclient.uid
if (not irc.connected.is_set()) or (not my_uid):
# Break if the network isn't ready.
log.debug("(%s) antispam.textfilters: skipping processing; network isn't ready", irc.name)
return
elif irc.is_internal_client(source):
# Ignore messages from our own clients.
log.debug("(%s) antispam.textfilters: skipping processing message from internal client %s", irc.name, source)
return
elif source not in irc.users:
log.debug("(%s) antispam.textfilters: ignoring message from non-user %s", irc.name, source)
return
if irc.is_channel(target):
channel_or_none = target
if target not in irc.channels or my_uid not in irc.channels[target].users:
# We're not monitoring this channel.
log.debug("(%s) antispam.textfilters: skipping processing message from channel %r we're not in", irc.name, target)
return
else:
channel_or_none = None
watch_pms = txf_settings.get('watch_pms', TEXTFILTER_DEFAULTS['watch_pms'])
if watch_pms == 'services':
if not irc.get_service_bot(target):
log.debug("(%s) antispam.textfilters: skipping processing; %r is not a service bot (watch_pms='services')", irc.name, target)
return
elif watch_pms == 'all':
log.debug("(%s) antispam.textfilters: checking all PMs (watch_pms='all')", irc.name)
pass
else:
# Not a channel.
log.debug("(%s) antispam.textfilters: skipping processing; %r is not a channel and watch_pms is disabled", irc.name, target)
return
# Merge together global and local textfilter lists.
txf_globs = set(conf.conf.get('antispam', {}).get('textfilter_globs', [])) | \
set(irc.serverdata.get('antispam_textfilter_globs', []))
punishment = txf_settings.get('punishment', TEXTFILTER_DEFAULTS['punishment']).lower()
reason = txf_settings.get('reason', TEXTFILTER_DEFAULTS['reason'])
if irc.get_service_option('antispam', 'strip_formatting', True):
text = utils.strip_irc_formatting(text)
if txf_settings.get('munge_unicode', TEXTFILTER_DEFAULTS['munge_unicode']):
text = str.translate(text, UNICODE_CHARMAP)
punished = False
for filterglob in txf_globs:
if utils.match_text(filterglob, text):
log.info("(%s) antispam: punishing %s => %s for text filter %r",
irc.name,
irc.get_friendly_name(source),
irc.get_friendly_name(target),
filterglob)
punished = _punish(irc, source, channel_or_none, punishment, reason)
break
return not punished # Filter this message from relay, etc. if it triggered protection
utils.add_hook(handle_textfilter, 'PRIVMSG', priority=999)
utils.add_hook(handle_textfilter, 'NOTICE', priority=999)
PARTQUIT_DEFAULTS = {
'watch_quits': True,
'watch_parts': True,
'part_filter_message': "Reason filtered",
'quit_filter_message': "Reason filtered",
}
def handle_partquit(irc, source, command, args):
"""Antispam part/quit message filter."""
text = args.get('text')
pq_settings = irc.get_service_option('antispam', 'partquit',
PARTQUIT_DEFAULTS)
if not text:
return # No text to match against
elif command == 'QUIT' and not pq_settings.get('watch_quits', True):
return # Not enabled
elif command == 'PART' and not pq_settings.get('watch_parts', True):
return
# Merge together global and local partquit filter lists.
pq_globs = set(conf.conf.get('antispam', {}).get('partquit_globs', [])) | \
set(irc.serverdata.get('antispam_partquit_globs', []))
if not pq_globs:
return
for filterglob in pq_globs:
if utils.match_text(filterglob, text):
# For parts, also log the affected channels
if command == 'PART':
filtered_message = pq_settings.get('part_filter_message', PARTQUIT_DEFAULTS['part_filter_message'])
log.info('(%s) antispam: filtered part message from %s on %s due to part/quit filter glob %s',
irc.name, irc.get_hostmask(source), ','.join(args['channels']), filterglob)
else:
filtered_message = pq_settings.get('quit_filter_message', PARTQUIT_DEFAULTS['quit_filter_message'])
log.info('(%s) antispam: filtered quit message from %s due to part/quit filter glob %s',
irc.name, args['userdata'].nick, filterglob)
args['text'] = filtered_message
break
utils.add_hook(handle_partquit, 'PART', priority=999)
utils.add_hook(handle_partquit, 'QUIT', priority=999)
| {
"repo_name": "GLolol/PyLink",
"path": "plugins/antispam.py",
"copies": "1",
"size": "19675",
"license": "mpl-2.0",
"hash": -5323735254086303000,
"line_mean": 40.826405868,
"line_max": 141,
"alpha_frac": 0.6027357222,
"autogenerated": false,
"ratio": 2.1227199404392603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32254556626392605,
"avg_score": null,
"num_lines": null
} |
"""ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2006 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from antlr3.streams import TokenStream, CharStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from antlr3.tree import Tree, CommonTree
from antlr3.tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from antlr3.streams import TokenStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
| {
"repo_name": "ncbray/pystream",
"path": "lib/antlr3/exceptions.py",
"copies": "1",
"size": "12739",
"license": "apache-2.0",
"hash": 4642764974015753000,
"line_mean": 33.9972527473,
"line_max": 86,
"alpha_frac": 0.6462830677,
"autogenerated": false,
"ratio": 4.472963483146067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010197105924784967,
"num_lines": 364
} |
"""ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from antlr3.streams import TokenStream, CharStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from antlr3.tree import Tree, CommonTree
from antlr3.tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from antlr3.streams import TokenStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
| {
"repo_name": "legatoproject/legato-af",
"path": "framework/tools/ifgen/antlr3/exceptions.py",
"copies": "1",
"size": "12590",
"license": "mpl-2.0",
"hash": 4312229561777484000,
"line_mean": 33.5879120879,
"line_max": 86,
"alpha_frac": 0.6539316918,
"autogenerated": false,
"ratio": 4.429978888106967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027836277129874277,
"num_lines": 364
} |
"""ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2012 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
super().__init__()
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from .streams import TokenStream, CharStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from .tree import Tree, CommonTree
from .tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode:
priorPayload = adaptor.getToken(priorNode)
if priorPayload and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from .streams import TokenStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
super().__init__(input)
self.expecting = expecting
def __str__(self):
return "MismatchedTokenException({!r}!={!r})".format(
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected {}".format(self.expecting)
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if not self.token:
return "UnwantedTokenException(found={}{})".format(None, exp)
return "UnwantedTokenException(found={}{})".format(self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
super().__init__(expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.token:
if self.inserted:
return "MissingTokenException(inserted {!r} at {!r})".format(
self.inserted, self.token.text)
return "MissingTokenException(at {!r})".format(self.token.text)
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
super().__init__(input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException({!r} not in [{!r}..{!r}])".format(
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
super().__init__(input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException({!r} not in {!r})".format(
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException({!r}!={!r})".format(
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
super().__init__(input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException({!r}!=[{!r}])".format(
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
super().__init__(input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
super().__init__(input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException({},{{{}}}?)".format(
self.ruleName, self.predicateText)
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
super().__init__(input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException({!r}!={!r})".format(
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
| {
"repo_name": "pballand/congress",
"path": "thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/exceptions.py",
"copies": "6",
"size": "12546",
"license": "apache-2.0",
"hash": 963743598186781600,
"line_mean": 33.467032967,
"line_max": 81,
"alpha_frac": 0.635501355,
"autogenerated": false,
"ratio": 4.548948513415518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8184449868415519,
"avg_score": null,
"num_lines": null
} |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2006 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
def getText(self):
"""@brief Get the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def setText(self, text):
"""@brief Set the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def getType(self):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def setType(self, ttype):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def getLine(self):
"""@brief Get the line number on which this token was matched
Lines are numbered 1..n
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def getCharPositionInLine(self):
"""@brief Get the column of the tokens first character,
Columns are numbered 0..n-1
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def setCharPositionInLine(self, pos):
"""@brief Set the column of the tokens first character,
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def getChannel(self):
"""@brief Get the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def setChannel(self, channel):
"""@brief Set the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def getTokenIndex(self):
"""@brief Get the index in the input stream.
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def setTokenIndex(self, index):
"""@brief Set the index in the input stream.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
Token.__init__(self)
if oldToken is not None:
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.index = oldToken.index
self._text = oldToken._text
if isinstance(oldToken, CommonToken):
self.input = oldToken.input
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.type = type
self.input = input
self.charPositionInLine = -1 # set to invalid position
self.line = 0
self.channel = channel
#What token number is this from 0..n-1 tokens; < 0 implies invalid index
self.index = -1
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
def getText(self):
if self._text is not None:
return self._text
if self.input is None:
return None
return self.input.substring(self.start, self.stop)
def setText(self, text):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = text
text = property(getText, setText)
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\\\n")
txt = txt.replace("\r","\\\\r")
txt = txt.replace("\t","\\\\t")
else:
txt = "<no text>"
return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
self.index,
self.start, self.stop,
txt,
self.type, channelStr,
self.line, self.charPositionInLine
)
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None
):
Token.__init__(self)
if oldToken is not None:
self.text = oldToken.text
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.text = text
self.type = type
self.line = None
self.charPositionInLine = None
self.channel = channel
self.index = None
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is None:
txt = "<no text>"
return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
txt,
self.type,
channelStr,
self.line,
self.charPositionInLine
)
__str__ = toString
__repr__ = toString
EOF_TOKEN = CommonToken(type=EOF)
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| {
"repo_name": "ncbray/pystream",
"path": "lib/antlr3/tokens.py",
"copies": "1",
"size": "12016",
"license": "apache-2.0",
"hash": -2647893228699563000,
"line_mean": 27.8846153846,
"line_max": 87,
"alpha_frac": 0.5937916112,
"autogenerated": false,
"ratio": 4.671850699844479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02631169443421328,
"num_lines": 416
} |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
EOF = -1
## All tokens go to the parser (unless skip() is called in that rule)
# on a particular "channel". The parser tunes to a particular channel
# so that whitespace etc... can go to the parser on a "hidden" channel.
DEFAULT_CHANNEL = 0
## Anything on different channel than DEFAULT_CHANNEL is not parsed
# by parser.
HIDDEN_CHANNEL = 99
# Predefined token types
EOR_TOKEN_TYPE = 1
##
# imaginary tree navigation type; traverse "get child" link
DOWN = 2
##
#imaginary tree navigation type; finish with a child list
UP = 3
MIN_TOKEN_TYPE = UP+1
INVALID_TOKEN_TYPE = 0
| {
"repo_name": "legatoproject/legato-af",
"path": "framework/tools/ifgen/antlr3/constants.py",
"copies": "1",
"size": "2109",
"license": "mpl-2.0",
"hash": -6999353295242839000,
"line_mean": 36.6607142857,
"line_max": 75,
"alpha_frac": 0.7591275486,
"autogenerated": false,
"ratio": 4.111111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000825588856232148,
"num_lines": 56
} |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2012 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import DEFAULT_CHANNEL, EOF, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
TOKEN_NAMES_MAP = None
@classmethod
def registerTokenNamesMap(cls, tokenNamesMap):
"""@brief Store a mapping from token type to token name.
This enables token.typeName to give something more meaningful
than, e.g., '6'.
"""
cls.TOKEN_NAMES_MAP = tokenNamesMap
cls.TOKEN_NAMES_MAP[EOF] = "EOF"
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
index=-1, line=0, charPositionInLine=-1, input=None):
# We use -1 for index and charPositionInLine as an invalid index
self._type = type
self._channel = channel
self._text = text
self._index = index
self._line = 0
self._charPositionInLine = charPositionInLine
self.input = input
# To override a property, you'll need to override both the getter and setter.
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
# For compatibility
def getType(self):
return self._type
@property
def typeName(self):
if self.TOKEN_NAMES_MAP:
return self.TOKEN_NAMES_MAP.get(self._type, "INVALID_TOKEN_TYPE")
else:
return str(self._type)
@property
def line(self):
"""Lines are numbered 1..n."""
return self._line
@line.setter
def line(self, value):
self._line = value
@property
def charPositionInLine(self):
"""Columns are numbered 0..n-1."""
return self._charPositionInLine
@charPositionInLine.setter
def charPositionInLine(self, pos):
self._charPositionInLine = pos
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def index(self):
"""
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
"""
return self._index
@index.setter
def index(self, value):
self._index = value
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
if oldToken:
super().__init__(oldToken.type, oldToken.channel, oldToken.text,
oldToken.index, oldToken.line,
oldToken.charPositionInLine, oldToken.input)
if isinstance(oldToken, CommonToken):
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.start = start
self.stop = stop
else:
super().__init__(type=type, channel=channel, input=input)
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
@property
def text(self):
# Could be the empty string, and we want to return that.
if self._text is not None:
return self._text
if not self.input:
return None
if self.start < self.input.size() and self.stop < self.input.size():
return self.input.substring(self.start, self.stop)
return '<EOF>'
@text.setter
def text(self, value):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = value
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt:
# Put 2 backslashes in front of each character
txt = txt.replace("\n", r"\\n")
txt = txt.replace("\r", r"\\r")
txt = txt.replace("\t", r"\\t")
else:
txt = "<no text>"
return ("[@{0.index},{0.start}:{0.stop}={txt!r},"
"<{0.typeName}>{channelStr},"
"{0.line}:{0.charPositionInLine}]"
.format(self, txt=txt, channelStr=channelStr))
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None):
if oldToken:
super().__init__(type=oldToken.type, channel=oldToken.channel,
text=oldToken.text, line=oldToken.line,
charPositionInLine=oldToken.charPositionInLine)
else:
super().__init__(type=type, channel=channel, text=text,
index=None, line=None, charPositionInLine=None)
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if not txt:
txt = "<no text>"
return ("[@{0.index!r},{txt!r},<{0.type!r}>{channelStr},"
"{0.line!r}:{0.charPositionInLine!r}]"
.format(self, txt=txt, channelStr=channelStr))
__str__ = toString
__repr__ = toString
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| {
"repo_name": "ramineni/my_congress",
"path": "thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tokens.py",
"copies": "6",
"size": "9741",
"license": "apache-2.0",
"hash": 186915511319662900,
"line_mean": 30.4225806452,
"line_max": 81,
"alpha_frac": 0.6006570167,
"autogenerated": false,
"ratio": 4.358389261744967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029458485979369908,
"num_lines": 310
} |
import sys
import antlr4
from gen.TeX2SymLexer import TeX2SymLexer
from gen.TeX2SymParser import TeX2SymParser
from gen.TeX2SymVisitor import TeX2SymVisitor
from sympy import *
# variable : a,b,...,z, A,B,..,Z, \\alpha, \\beta,..,\pi,... , \\omega (except E, I, N, O, S, zeta, omicron)
# math constant : pi; --> \\ppi, i --> \\ii, e --> \\ee
# LaTeX Code Style: \\sin{x}, \\log\_{2}{8}, \\sum\_{k=1}\^{n}{k(k+1)\^2},...
class LaTeX2SymPyVisitor(TeX2SymVisitor):
def visitPrintExpr(self, ctx):
value = self.visit(ctx.expr())
return value
def visitInt(self, ctx):
return ctx.INT().getText()
def visitFloat(self, ctx):
float_str=ctx.FLOAT().getText()
return 'nsimplify({:s})'.format(float_str)
def visitAlphabet(self, ctx):
return ctx.ALPHABET().getText()
def visitGreek(self, ctx):
return ctx.GREEK().getText()
def visitMult(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}'.format(left,right)
def visitDiv(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}**(-1)'.format(left,right)
def visitMull(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}*{:s}'.format(left,right)
def visitAddSub(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.op.type == TeX2SymParser.PLUS:
return '{:s}+{:s}'.format(left,right)
else:
return '{:s}-{:s}'.format(left,right)
def visitCs_parens(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitParens(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitCs_bs_braces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitBs_braces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitBraces(self, ctx):
expr = self.visit(ctx.expr())
return '({:s})'.format(expr)
def visitPower(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '{:s}**{:s}'.format(left,right)
def visitFactrial(self, ctx):
expr = self.visit(ctx.expr())
return 'factorial({:s})'.format(expr)
def visitFunc(self, ctx):
expr = self.visit(ctx.expr())
if ctx.func.type == TeX2SymParser.SQRT:
result='sqrt({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.SIN:
result='sin({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.COS:
result='cos({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.TAN:
result='tan({:s})'.format(expr)
elif ctx.func.type == TeX2SymParser.LOG:
result='log({:s})'.format(expr)
return result
def visitSqrtn(self, ctx):
expr1 = self.visit(ctx.expr(0))
expr2 = self.visit(ctx.expr(1))
return '(({:s})**(({:s})**(-1)))'.format(expr2,expr1)
def visitLogub(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
return 'log({})*(log({})**(-1))'.format(expr1,expr0)
def visitAbs(self, ctx):
expr = self.visit(ctx.expr())
return 'Abs({})'.format(expr)
def visitTrign(self, ctx):
expr1 = self.visit(ctx.expr(0))
expr2 = self.visit(ctx.expr(1))
if ctx.func.type == TeX2SymParser.SIN:
result='(sin({:s}))**({:s})'.format(expr2,expr1)
elif ctx.func.type == TeX2SymParser.COS:
result='(cos({:s}))**({:s})'.format(expr2,expr1)
elif ctx.func.type == TeX2SymParser.TAN:
result='(tan({:s}))**({:s})'.format(expr2,expr1)
return result
def visitFrac(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return '({:s})*({:s})**(-1)'.format(left, right)
def visitSum(self, ctx):
expr0= self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
expr3 = self.visit(ctx.expr(3))
return 'summation({:s},({:s},{:s},{:s}))'.format(expr3,expr0,expr1,expr2)
def visitDiff(self, ctx):
expr = self.visit(ctx.expr())
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s})'.format(expr,symb)
def visitDiffn1(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s},{:s})'.format(expr1,symb,expr0)
def visitDiffn2(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
if expr0 != expr1:
return None
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'diff({:s},{:s},{:s})'.format(expr2,symb,expr0)
def visitIntegrate(self, ctx):
expr = self.visit(ctx.expr())
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'integrate({:s},{:s})'.format(expr,symb)
def visitDintegrate(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
if ctx.dxg.type == TeX2SymParser.DX:
symb = ctx.DX().getText()[1]
elif ctx.dxg.type == TeX2SymParser.DGREEK:
symb = ctx.DGREEK().getText()[1:]
return 'integrate({:s},({:s},{:s},{:s}))'.format(expr2,symb,expr0,expr1)
def visitLim(self, ctx):
expr0 = self.visit(ctx.expr(0))
expr1 = self.visit(ctx.expr(1))
expr2 = self.visit(ctx.expr(2))
return 'limit({:s}, {:s}, {:s})'.format(expr2,expr0,expr1)
def visitCombi_permu(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.cp.type == TeX2SymParser.COMBI:
result='binomial({:s},{:s})'.format(left,right)
elif ctx.cp.type == TeX2SymParser.PERMU:
result='ff({:s},{:s})'.format(left,right)
return result
def visitSeqterm(self, ctx):
expr = self.visit(ctx.expr())
return 'F({})'.format(expr)
def visitFunction(self, ctx):
expr = self.visit(ctx.expr())
return 'f({})'.format(expr)
def visitGammaf_zetaf(self, ctx):
expr = self.visit(ctx.expr())
if ctx.gz.type == TeX2SymParser.GAMMAF:
result='gamma({})'.format(expr)
elif ctx.gz.type == TeX2SymParser.ZETAF:
result='zeta({})'.format(expr)
return result
def visitPlusExpr(self, ctx):
expr = self.visit(ctx.expr())
return expr
def visitMinusExpr(self, ctx):
expr = self.visit(ctx.expr())
return '(-1)*' + expr
def visitMathconst(self, ctx):
if ctx.const.type == TeX2SymParser.PI:
result='S.Pi'
elif ctx.const.type == TeX2SymParser.IMAGINARY_UNIT:
result='S.ImaginaryUnit'
elif ctx.const.type == TeX2SymParser.NAPIER_CONSTANT:
result='S.Exp1'
elif ctx.const.type == TeX2SymParser.INFTY:
result='oo'
return result
def visitEqual(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
return 'Eq({:s},{:s})'.format(left,right)
def visitRelation(self, ctx):
left = self.visit(ctx.expr(0))
right = self.visit(ctx.expr(1))
if ctx.op.type == TeX2SymParser.GT:
return '{:s}>{:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.LT:
return '{:s}<{:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.GEQQ:
return '{:s}>={:s}'.format(left,right)
elif ctx.op.type == TeX2SymParser.LEQQ:
return '{:s}<={:s}'.format(left,right)
greek_list = [['\\alpha', 'aalpha'], ['\\beta', 'bbeta'], ['\\gamma', 'ggamma'], ['\\delta', 'ddelta'], ['\\epsilon', 'eepsilon'],
['\\eta', 'eeta'], ['\\theta', 'ttheta'], ['\\iota', 'iiota'], ['\\kappa', 'kkappa'], ['\\lambda', 'llambda'], ['\\mu', 'mmu'],
['\\nu', 'nnu'], ['\\xi', 'xxi'], ['\\omicron', 'oomicron'], ['\\pi', 'pppi'], ['\\rho', 'rrho'], ['\\sigma', 'ssigma'],
['\\tau', 'ttau'], ['\\upsilon', 'uupsilon'], ['\\phi', 'pphi'], ['\\chi', 'cchi'], ['\\psi', 'ppsi'], ['\\omega', 'oomega']]
def tex2sym(texexpr):
if texexpr == '':
return ''
for le in greek_list:
texexpr=texexpr.replace(le[0],le[1])
expr=antlr4.InputStream(texexpr+'\n')
lexer = TeX2SymLexer(expr)
token_stream = antlr4.CommonTokenStream(lexer)
#token_stream.fill()
#print('tokens:')
#for tk in token_stream.tokens:
# print(tk)
parser = TeX2SymParser(token_stream)
tree = parser.prog()
visitor = LaTeX2SymPyVisitor()
result=visitor.visit(tree)
return result
def mylatex(sympyexpr):
texexpr = latex(sympyexpr)
for le in greek_list:
texexpr=texexpr.replace(le[1],le[0]+' ')
return texexpr
def mylatexstyle(texexpr):
replace_list=[['\\ii',' i '],['\\ee',' e '],['\\ppi','\\pi '],['\\C','\\mathrm{C}'],['\\P','\\mathrm{P}']]
for le in replace_list:
texexpr=texexpr.replace(le[0],le[1])
return texexpr
def test(texexpr):
print(texexpr.replace('\\','\\\\')+' --> '+ tex2sym(texexpr))
if __name__ == '__main__':
print('tex2sym: LaTeX math expression --> SymPy form')
test('-2-3+4')
test('2\\times3^4')
test('0.5 \\times 3 \\div 5a\\cdot 4')
test('2\\times3!')
test('2ab^2(x+y)^3')
test('\\sqrt{3x}')
test('\\frac{2}{3}a')
test('\\dfrac{2}{3}a')
test('\\sin {\\ppi x}')
test('\\log{\\ee^3}')
test('\\frac{d}{dx}{x^5}')
test('\\int{\\sin^{2}{\\theta} d\\theta}')
test('\\sum_{k=1}^{n}{k^3}')
test('2x^2+3x+4=0')
test('3x^2-4x+5 \\geqq 0')
test('\\frac{d^{2}}{dx^{2}}{f(x)}=-f(x)')
test('\\alpha\\beta\\gamma\\delta\\epsilon\\eta\\theta\\iota\\kappa\\lambda\\mu\\nu\\xi\\pi\\rho\\sigma\\tau\\upsilon\\phi\\chi\\psi\\omega\\ppi')
test('(a\\!aa\\,a\\:a\\;a~a)^3')
test('\\{\\dfrac{1}{~2~}a-(\\dfrac{1}{~3~}b-\\dfrac{1}{~4~}c)\\}^2')
test(r'\left\{\dfrac{1}{~2~}a-\left(\dfrac{1}{~3~}b-\dfrac{1}{~4~}c\right)\right\}^2')
| {
"repo_name": "AkiraHakuta/antlr4_tex2sym",
"path": "antlr4_tex2sym.py",
"copies": "1",
"size": "11716",
"license": "mit",
"hash": -5533862589729912000,
"line_mean": 31.6378830084,
"line_max": 153,
"alpha_frac": 0.516473199,
"autogenerated": false,
"ratio": 3.097012952683056,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41134861516830556,
"avg_score": null,
"num_lines": null
} |
# ANTLR Tree -> AST
# antlr4 -Dlanguage=Python3 Cstar.g4
# python3 main.py test.cx
# nLine = x.start.line
# как работает импорт
# в defs[] создается запись вида {isa: import, unit: *ast, id: ID} (по сути - линк)
# он имеет ID определенный в импортирующем модуле (import as ID)
# и ссылается на откомпиленный AST/ASG
# сами AST/ASG идентифицируются по их ПОЛНОМУ имени
# если объект уже есть он берется из INDEX'а что исключает повторные проходы
import os
import sys
from antlr4 import *
from CstarLexer import CstarLexer
from CstarParser import CstarParser
from antlr4.error.ErrorListener import ErrorListener
from CstarVisitor import CstarVisitor
#import printer_cstar as printer
#import prn.c.printer as printer
import prn.c.split_printer as printer
import proc
from error import error, set_error_file, set_error_data, get_error_data, fatal
BASE_MAKEFILE = """
CSRC=src
all:
cxc
gcc src/*.c
clean:
rm $(CSRC)/*.c $(CSRC)/*.h
"""
CXLIB = ""
BASE = ""
class MyErrorListener( ErrorListener ):
def __init__(self):
super(MyErrorListener, self).__init__()
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
xlen = offendingSymbol.stop - offendingSymbol.start
er = {'ti': {'isa': 'ti', 'line': line, 'start': column, 'len': xlen}}
msg = msg.replace('\n', 'NL')
error(msg, er)
# Все AST заносятся сюда, и это исключает повторный парсинг
AST_INDEX = []
def get_ast_for_file(filename):
# store & set new error context
ed = get_error_data()
set_error_file(filename)
istream = FileStream(filename)
lexer = CstarLexer(istream)
stream = CommonTokenStream(lexer)
parser = CstarParser(stream)
parser._listeners = [MyErrorListener()]
tree = parser.unit()
visitor = CstarVisitor()
# restore error context
set_error_data(ed)
return visitor.visit(tree)
def ximport(text):
parts = text.split(" ")
imp_name = ""
imp_id = ""
if len(parts) > 2:
# import xxx as x
imp_name = parts[1]
imp_id = parts[3]
else:
# import xxx
imp_name = parts[1]
imp_id = imp_name.split("/")[-1]
imp_ast = parse(imp_name)
xx = None
if imp_ast != None:
#print("ADD IMPORT: " + imp_id)
xx = {
'isa': 'import',
'id': imp_id,
'unit': imp_ast,
}
return xx
def pragma(text, lines):
parts = text.split(" ")
p_name = parts[1]
data = ""
for line in lines[1:]:
data += line[2:] + '\n'
xx = None
if p_name != "":
xx = {
'isa': 'pragma',
'id': p_name,
'data': data,
}
return xx
def xinclude(text):
parts = text.split(" ")
imp_name = parts[1]
# на всякий случай скинем ссылку на этот AST (распарсенный)
# если у процессора он будет в индексе - она не пригодится
# а если нет - он отпроцессит этот юнит впервые
imp_unit = parse(imp_name)
if imp_unit == None:
print("CANNOT IMPORT: " + imp_name)
return None
xx = {
'isa': 'include',
'id': imp_name,
'unit': imp_unit
}
return xx
def parse_file(filename):
fullname = os.path.abspath(filename)
# возможно, мы уже парсили этот сорс -
# тогда вернем готовый ast из кэша
for a in AST_INDEX:
if a['source'] == fullname:
#print("AST_HIT: " + fullname)
return a
print("parse: " + fullname)
ast = get_ast_for_file(filename)
ast['source'] = fullname
AST_INDEX.append(ast)
new_elements = []
# если в питоне добавляешь в наччало списка по которому ходишь в цикле
# элемент, то цикл не кончается а идет и идет ..
# check all IMPORTs in the unit
for i in ast['defs']:
if i['isa'] == 'comment':
lines = i['value'].split('\n')
for text in lines:
if text[0:5] == '#unit':
print("#UNIT ")
elif text[0:5] == '#package':
print("#PACKAGE ")
elif text[0:7] == '#import':
xi = ximport(text)
if xi != None:
new_elements.append(xi)
elif text[0:8] == '#include':
xi = xinclude(text)
if xi != None:
new_elements.append(xi)
elif text[0:7] == '#pragma':
xi = pragma(text, lines)
if xi != None:
new_elements.append(xi)
# прибавляем к началу определений внешние импорты
ast['defs'] = new_elements + ast['defs']
return ast
def get_prefix(base, filename):
prefix = filename[len(base) + 1:]
if prefix[-7:] == 'main.cx':
prefix = prefix[:-8]
else:
prefix = prefix
prefix = prefix.replace('/', '_')
if prefix[-3:] == '.cx':
prefix = prefix [:-3]
return prefix
def parse(name):
global BASE
# смотрим что это вообще такое
if os.path.isfile(name + ".cx"):
newd = os.path.dirname(name + ".cx")
fname = os.path.basename(name + ".cx")
elif os.path.isdir(name):
newd = name
fname = 'main.cx'
elif os.path.exists(CXLIB + "/" + name) or os.path.exists(CXLIB + "/" + name + ".cx"):
# подменяем рабочий катлог и базу
oldd = os.getcwd()
old_base = BASE
BASE = CXLIB
os.chdir(CXLIB)
xxx = parse(name)
# возвращаем каталог и базу на место
os.chdir(oldd)
BASE = old_base
return xxx
else:
return None
# go into!
oldd = os.getcwd()
if newd != "":
os.chdir(newd)
ast = parse_file(fname)
fullname = os.path.abspath(fname)
prefix = get_prefix(BASE, fullname)
ast['prefix'] = prefix
name = prefix
if prefix == "":
#name = BASE.split("/")[-1]
name = 'main'
ast['name'] = name
# old shit
"""#import os
print("fullname> " + fullname)
#print("fname> " + fname)
#print("BASE> " + BASE)
comm = os.path.commonpath([BASE, fullname])
name = fullname[len(comm) + 1:-3]
name = name.replace('/', '_')
print("name> " + name)
print("pref> " + ast['prefix'])
"""
# go back
os.chdir(oldd)
return ast
def main(argv):
import json
global BASE, CXLIB
# CXLIB="/Users/alex/cx/lib"
# CXPATH="/Users/alex/cx"
if not 'CXLIB' in os.environ:
print("error: CXLIB ont defined")
exit(1)
CXLIB = os.environ['CXLIB']
cwd = os.getcwd()
# если есть параметр - парсим его, если нет - парсим текущий каталог
if len(sys.argv) > 1:
name = sys.argv[1]
BASE = cwd + '/' + name
else:
name = os.path.basename(cwd)
BASE = cwd
os.chdir(BASE)
print("parsing")
ast = parse("main")
if ast == None:
fatal("main unit not found")
exit(1)
ast['prefix'] = name
if ast == None:
print("NO-AST")
exit()
print("processing")
asg = proc.unit(ast)
if not os.path.exists("src"):
os.makedirs("src")
if not os.path.isfile("Makefile"):
makefile = open('Makefile', 'w+')
makefile.write(BASE_MAKEFILE)
makefile.close()
text = printer.xprint(asg, file=("main" + '.c'))
print("done.")
if __name__ == '__main__':
main(sys.argv)
from error import errcnt
exit(errcnt)
def print_defs(u):
for xx in u['defs']:
if xx['isa'] != 'comment':
print('%s/%s' % (xx['isa'], xx['id']))
| {
"repo_name": "ammaaim/cx",
"path": "main.py",
"copies": "1",
"size": "7652",
"license": "mit",
"hash": -2289755926452632000,
"line_mean": 19.060518732,
"line_max": 88,
"alpha_frac": 0.6002011205,
"autogenerated": false,
"ratio": 2.5470179290157335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8538872695247411,
"avg_score": 0.02166927085366448,
"num_lines": 347
} |
from __future__ import absolute_import, print_function, division
import array
import collections
import struct
import threading
import time
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
import logging
import usb.core
import usb.util
from .message import Message
from .commons import format_list
from .driver import find_driver
_logger = logging.getLogger("ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE,
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION,
Message.ID.RESPONSE_CAPABILITIES,
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS,
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000: # or m._data[0] == 0:
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4:]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
_logger.debug("Read data: %s (now have %s in buffer)",
format_list(data), format_list(self._buffer))
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber):
message = Message(Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber])
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array('B', struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType))
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array('B', struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.ACKNOWLEDGE_DATA,
array.array('B', [channel]) + data)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(Message.ID.BURST_TRANSFER_DATA,
array.array('B', [channel_seq]) + data)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) // 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8:i * 8 + 8]
_logger.debug("Send burst transfer, packet %d, seq %d, data %s", i, sequence, packet_data)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i == 0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
| {
"repo_name": "jforge/openant",
"path": "ant/base/ant.py",
"copies": "1",
"size": "12367",
"license": "mit",
"hash": 6939613810685109000,
"line_mean": 38.7652733119,
"line_max": 109,
"alpha_frac": 0.5525188,
"autogenerated": false,
"ratio": 4.297081306462822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349600106462822,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import array
import logging
try:
from functools import reduce
except ImportError:
pass
from .commons import format_list
_logger = logging.getLogger("ant.base.message")
class Message:
class ID:
INVALID = 0x00
# Configuration messages
UNASSIGN_CHANNEL = 0x41
ASSIGN_CHANNEL = 0x42
SET_CHANNEL_ID = 0x51
SET_CHANNEL_PERIOD = 0x43
SET_CHANNEL_SEARCH_TIMEOUT = 0x44
SET_CHANNEL_RF_FREQ = 0x45
SET_NETWORK_KEY = 0x46
SET_TRANSMIT_POWER = 0x47
SET_SEARCH_WAVEFORM = 0x49 # XXX: Not in official docs
ADD_CHANNEL_ID = 0x59
CONFIG_LIST = 0x5A
SET_CHANNEL_TX_POWER = 0x60
LOW_PRIORITY_CHANNEL_SEARCH_TIMOUT = 0x63
SERIAL_NUMBER_SET_CHANNEL = 0x65
ENABLE_EXT_RX_MESGS = 0x66
ENABLE_LED = 0x68
ENABLE_CRYSTAL = 0x6D
LIB_CONFIG = 0x6E
FREQUENCY_AGILITY = 0x70
PROXIMITY_SEARCH = 0x71
CHANNEL_SEARCH_PRIORITY = 0x75
# SET_USB_INFO = 0xff
# Notifications
STARTUP_MESSAGE = 0x6F
SERIAL_ERROR_MESSAGE = 0xAE
# Control messags
RESET_SYSTEM = 0x4A
OPEN_CHANNEL = 0x4B
CLOSE_CHANNEL = 0x4C
OPEN_RX_SCAN_MODE = 0x5B
REQUEST_MESSAGE = 0x4D
SLEEP_MESSAGE = 0xC5
# Data messages
BROADCAST_DATA = 0x4E
ACKNOWLEDGE_DATA = 0x4F
BURST_TRANSFER_DATA = 0x50
# Responses (from channel)
RESPONSE_CHANNEL = 0x40
# Responses (from REQUEST_MESSAGE, 0x4d)
RESPONSE_CHANNEL_STATUS = 0x52
RESPONSE_CHANNEL_ID = 0x51
RESPONSE_VERSION = 0x3E
RESPONSE_CAPABILITIES = 0x54
RESPONSE_SERIAL_NUMBER = 0x61
class Code:
RESPONSE_NO_ERROR = 0
EVENT_RX_SEARCH_TIMEOUT = 1
EVENT_RX_FAIL = 2
EVENT_TX = 3
EVENT_TRANSFER_RX_FAILED = 4
EVENT_TRANSFER_TX_COMPLETED = 5
EVENT_TRANSFER_TX_FAILED = 6
EVENT_CHANNEL_CLOSED = 7
EVENT_RX_FAIL_GO_TO_SEARCH = 8
EVENT_CHANNEL_COLLISION = 9
EVENT_TRANSFER_TX_START = 10
CHANNEL_IN_WRONG_STATE = 21
CHANNEL_NOT_OPENED = 22
CHANNEL_ID_NOT_SET = 24
CLOSE_ALL_CHANNELS = 25
TRANSFER_IN_PROGRESS = 31
TRANSFER_SEQUENCE_NUMBER_ERROR = 32
TRANSFER_IN_ERROR = 33
MESSAGE_SIZE_EXCEEDS_LIMIT = 39
INVALID_MESSAGE = 40
INVALID_NETWORK_NUMBER = 41
INVALID_LIST_ID = 48
INVALID_SCAN_TX_CHANNEL = 49
INVALID_PARAMETER_PROVIDED = 51
EVENT_SERIAL_QUE_OVERFLOW = 52
EVENT_QUE_OVERFLOW = 53
NVM_FULL_ERROR = 64
NVM_WRITE_ERROR = 65
USB_STRING_WRITE_FAIL = 112
MESG_SERIAL_ERROR_ID = 174
EVENT_RX_BROADCAST = 1000
EVENT_RX_FLAG_BROADCAST = 1001
EVENT_RX_ACKNOWLEDGED = 2000
EVENT_RX_FLAG_ACKNOWLEDGED = 2001
EVENT_RX_BURST_PACKET = 3000
EVENT_RX_FLAG_BURST_PACKET = 3001
@staticmethod
def lookup(event):
for key, value in Message.Code.__dict__.items():
if type(value) == int and value == event:
return key
def __init__(self, mId, data):
self._sync = 0xa4
self._length = len(data)
self._id = mId
self._data = data
self._checksum = (self._sync ^ self._length ^ self._id
^ reduce(lambda x, y: x ^ y, data))
def __repr__(self):
return str.format(
"<ant.base.Message {0:02x}:{1} (s:{2:02x}, l:{3}, c:{4:02x})>",
self._id, format_list(self._data), self._sync,
self._length, self._checksum)
def get(self):
result = array.array('B', [self._sync, self._length, self._id])
result.extend(self._data)
result.append(self._checksum)
return result
@staticmethod
def parse(buf):
"""
Parse a message from an array
"""
sync = buf[0]
length = buf[1]
mId = buf[2]
data = buf[3:-1]
checksum = buf[-1]
assert sync == 0xa4
assert length == len(data)
assert checksum == reduce(lambda x, y: x ^ y, buf[:-1])
return Message(mId, data)
| {
"repo_name": "jforge/openant",
"path": "ant/base/message.py",
"copies": "1",
"size": "5473",
"license": "mit",
"hash": -5153328611619358000,
"line_mean": 29.7471910112,
"line_max": 77,
"alpha_frac": 0.6069797186,
"autogenerated": false,
"ratio": 3.593565331582403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700545050182403,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import collections
import threading
import logging
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
size = len(self.channels)
channel = Channel(size, self, self.ant)
self.channels[size] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
| {
"repo_name": "jforge/openant",
"path": "ant/easy/node.py",
"copies": "1",
"size": "4727",
"license": "mit",
"hash": 8332523864201892000,
"line_mean": 33.2536231884,
"line_max": 84,
"alpha_frac": 0.6445948805,
"autogenerated": false,
"ratio": 3.999153976311337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007596765939253641,
"num_lines": 138
} |
from __future__ import absolute_import, print_function
import logging
from ant.base.message import Message
from ant.easy.exception import AntException, TransferFailedException
_logger = logging.getLogger("ant.easy.filter")
def wait_for_message(match, process, queue, condition):
"""
Wait for a specific message in the *queue* guarded by the *condition*
matching the function *match* (which is a function that takes a
message as a parameter and returns a boolean). The messages is
processed by the *process* function before returning it.
"""
_logger.debug("wait for message matching %r", match)
condition.acquire()
for _ in range(10):
_logger.debug("looking for matching message in %r", queue)
# _logger.debug("wait for response to %#02x, checking", mId)
for message in queue:
if match(message):
_logger.debug(" - response found %r", message)
queue.remove(message)
condition.release()
return process(message)
elif message[1] == 1 and message[2][0] in [Message.Code.EVENT_TRANSFER_TX_FAILED,
Message.Code.EVENT_RX_FAIL_GO_TO_SEARCH]:
_logger.warning("Transfer send failed:")
_logger.warning(message)
queue.remove(message)
condition.release()
raise TransferFailedException()
_logger.debug(" - could not find response matching %r", match)
condition.wait(1.0)
condition.release()
raise AntException("Timed out while waiting for message")
def wait_for_event(ok_codes, queue, condition):
def match(params):
channel, event, data = params
return data[0] in ok_codes
def process(params):
return params
return wait_for_message(match, process, queue, condition)
def wait_for_response(event_id, queue, condition):
"""
Waits for a response to a specific message sent by the channel response
message, 0x40. It's expected to return RESPONSE_NO_ERROR, 0x00.
"""
def match(params):
channel, event, data = params
return event == event_id
def process(params):
channel, event, data = params
if data[0] == Message.Code.RESPONSE_NO_ERROR:
return params
else:
raise Exception("Responded with error " + str(data[0])
+ ":" + Message.Code.lookup(data[0]))
return wait_for_message(match, process, queue, condition)
def wait_for_special(event_id, queue, condition):
"""
Waits for special responses to messages such as Channel ID, ANT
Version, etc. This does not throw any exceptions, besides timeouts.
"""
def match(params):
channel, event, data = params
return event == event_id
def process(params):
return params
return wait_for_message(match, process, queue, condition)
| {
"repo_name": "jforge/openant",
"path": "ant/easy/filter.py",
"copies": "1",
"size": "4099",
"license": "mit",
"hash": -6887332983168702000,
"line_mean": 36.6055045872,
"line_max": 96,
"alpha_frac": 0.6650402537,
"autogenerated": false,
"ratio": 4.2520746887966805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.541711494249668,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import logging
from ant.base.message import Message
from ant.easy.exception import TransferFailedException
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.channel")
class Channel():
class Type:
BIDIRECTIONAL_RECEIVE = 0x00
BIDIRECTIONAL_TRANSMIT = 0x10
SHARED_BIDIRECTIONAL_RECEIVE = 0x20
SHARED_BIDIRECTIONAL_TRANSMIT = 0x30
UNIDIRECTIONAL_RECEIVE_ONLY = 0x40
UNIDIRECTIONAL_TRANSMIT_ONLY = 0x50
def __init__(self, id, node, ant):
self.id = id
self._node = node
self._ant = ant
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._node._events, self._node._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._node._responses, self._node._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._node._responses, self._node._responses_cond)
def _assign(self, channelType, networkNumber):
self._ant.assign_channel(self.id, channelType, networkNumber)
return self.wait_for_response(Message.ID.ASSIGN_CHANNEL)
def _unassign(self):
pass
def open(self):
self._ant.open_channel(self.id)
return self.wait_for_response(Message.ID.OPEN_CHANNEL)
def set_id(self, deviceNum, deviceType, transmissionType):
self._ant.set_channel_id(self.id, deviceNum, deviceType, transmissionType)
return self.wait_for_response(Message.ID.SET_CHANNEL_ID)
def set_period(self, messagePeriod):
self._ant.set_channel_period(self.id, messagePeriod)
return self.wait_for_response(Message.ID.SET_CHANNEL_PERIOD)
def set_search_timeout(self, timeout):
self._ant.set_channel_search_timeout(self.id, timeout)
return self.wait_for_response(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT)
def set_rf_freq(self, rfFreq):
self._ant.set_channel_rf_freq(self.id, rfFreq)
return self.wait_for_response(Message.ID.SET_CHANNEL_RF_FREQ)
def set_search_waveform(self, waveform):
self._ant.set_search_waveform(self.id, waveform)
return self.wait_for_response(Message.ID.SET_SEARCH_WAVEFORM)
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self._ant.request_message(self.id, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def send_acknowledged_data(self, data):
try:
_logger.debug("send acknowledged data %s", self.id)
self._ant.send_acknowledged_data(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending acknowledged data %s", self.id)
except TransferFailedException:
_logger.warning("failed to send acknowledged data %s, retrying", self.id)
self.send_acknowledged_data(data)
def send_burst_transfer_packet(self, channelSeq, data, first):
_logger.debug("send burst transfer packet %s", data)
self._ant.send_burst_transfer_packet(channelSeq, data, first)
def send_burst_transfer(self, data):
try:
_logger.debug("send burst transfer %s", self.id)
self._ant.send_burst_transfer(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_START])
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending burst transfer %s", self.id)
except TransferFailedException:
_logger.warning("failed to send burst transfer %s, retrying", self.id)
self.send_burst_transfer(data)
| {
"repo_name": "jforge/openant",
"path": "ant/easy/channel.py",
"copies": "1",
"size": "4994",
"license": "mit",
"hash": 2141833254105536500,
"line_mean": 40.6166666667,
"line_max": 93,
"alpha_frac": 0.6938325991,
"autogenerated": false,
"ratio": 3.637290604515659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4831123203615659,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, print_function
import struct
class Beacon:
class ClientDeviceState:
LINK = 0x00 # 0b0000
AUTHENTICATION = 0x01 # 0b0001
TRANSPORT = 0x02 # 0b0010
BUSY = 0x03 # 0b0011
BEACON_ID = 0x43
def is_data_available(self):
return bool(self._status_byte_1 & 0x20) # 0b00100000
def is_upload_enabled(self):
return bool(self._status_byte_1 & 0x10) # 0b00010000
def is_pairing_enabled(self):
return bool(self._status_byte_1 & 0x08) # 0b00001000
def get_channel_period(self):
return self._status_byte_1 & 0x07 # 0b00000111, TODO
def get_client_device_state(self):
return self._status_byte_2 & 0x0f # 0b00001111, TODO
def get_serial(self):
return struct.unpack("<I", self._descriptor)[0]
def get_descriptor(self):
return struct.unpack("<HH", self._descriptor)
@staticmethod
def parse(data):
values = struct.unpack("<BBBB4x", data)
assert values[0] == 0x43
beacon = Beacon()
beacon._status_byte_1 = values[1]
beacon._status_byte_2 = values[2]
beacon._authentication_type = values[3]
beacon._descriptor = data[4:]
return beacon
| {
"repo_name": "jforge/openant",
"path": "ant/fs/beacon.py",
"copies": "1",
"size": "2391",
"license": "mit",
"hash": 3355720921504279000,
"line_mean": 33.1571428571,
"line_max": 77,
"alpha_frac": 0.6896695943,
"autogenerated": false,
"ratio": 3.7012383900928794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48909079843928793,
"avg_score": null,
"num_lines": null
} |
import array
import collections
import struct
import threading
import logging
from commons import format_list
_logger = logging.getLogger("garmin.ant.base.message")
class Message:
class ID:
INVALID = 0x00
# Configuration messages
UNASSIGN_CHANNEL = 0x41
ASSIGN_CHANNEL = 0x42
SET_CHANNEL_ID = 0x51
SET_CHANNEL_PERIOD = 0x43
SET_CHANNEL_SEARCH_TIMEOUT = 0x44
SET_CHANNEL_RF_FREQ = 0x45
SET_NETWORK_KEY = 0x46
SET_TRANSMIT_POWER = 0x47
SET_SEARCH_WAVEFORM = 0x49 # XXX: Not in official docs
ADD_CHANNEL_ID = 0x59
CONFIG_LIST = 0x5A
SET_CHANNEL_TX_POWER = 0x60
LOW_PRIORITY_CHANNEL_SEARCH_TIMOUT = 0x63
SERIAL_NUMBER_SET_CHANNEL = 0x65
ENABLE_EXT_RX_MESGS = 0x66
ENABLE_LED = 0x68
ENABLE_CRYSTAL = 0x6D
LIB_CONFIG = 0x6E
FREQUENCY_AGILITY = 0x70
PROXIMITY_SEARCH = 0x71
CHANNEL_SEARCH_PRIORITY = 0x75
#SET_USB_INFO = 0xff
# Notifications
STARTUP_MESSAGE = 0x6F
SERIAL_ERROR_MESSAGE = 0xAE
# Control messags
RESET_SYSTEM = 0x4A
OPEN_CHANNEL = 0x4B
CLOSE_CHANNEL = 0x4C
OPEN_RX_SCAN_MODE = 0x5B
REQUEST_MESSAGE = 0x4D
SLEEP_MESSAGE = 0xC5
# Data messages
BROADCAST_DATA = 0x4E
ACKNOWLEDGE_DATA = 0x4F
BURST_TRANSFER_DATA = 0x50
# Responses (from channel)
RESPONSE_CHANNEL = 0x40
# Responses (from REQUEST_MESSAGE, 0x4d)
RESPONSE_CHANNEL_STATUS = 0x52
RESPONSE_CHANNEL_ID = 0x51
RESPONSE_VERSION = 0x3E
RESPONSE_CAPABILITIES = 0x54
RESPONSE_SERIAL_NUMBER = 0x61
class Code:
RESPONSE_NO_ERROR = 0
EVENT_RX_SEARCH_TIMEOUT = 1
EVENT_RX_FAIL = 2
EVENT_TX = 3
EVENT_TRANSFER_RX_FAILED = 4
EVENT_TRANSFER_TX_COMPLETED = 5
EVENT_TRANSFER_TX_FAILED = 6
EVENT_CHANNEL_CLOSED = 7
EVENT_RX_FAIL_GO_TO_SEARCH = 8
EVENT_CHANNEL_COLLISION = 9
EVENT_TRANSFER_TX_START = 10
CHANNEL_IN_WRONG_STATE = 21
CHANNEL_NOT_OPENED = 22
CHANNEL_ID_NOT_SET = 24
CLOSE_ALL_CHANNELS = 25
TRANSFER_IN_PROGRESS = 31
TRANSFER_SEQUENCE_NUMBER_ERROR = 32
TRANSFER_IN_ERROR = 33
MESSAGE_SIZE_EXCEEDS_LIMIT = 39
INVALID_MESSAGE = 40
INVALID_NETWORK_NUMBER = 41
INVALID_LIST_ID = 48
INVALID_SCAN_TX_CHANNEL = 49
INVALID_PARAMETER_PROVIDED = 51
EVENT_SERIAL_QUE_OVERFLOW = 52
EVENT_QUE_OVERFLOW = 53
NVM_FULL_ERROR = 64
NVM_WRITE_ERROR = 65
USB_STRING_WRITE_FAIL = 112
MESG_SERIAL_ERROR_ID = 174
EVENT_RX_BROADCAST = 1000
EVENT_RX_FLAG_BROADCAST = 1001
EVENT_RX_ACKNOWLEDGED = 2000
EVENT_RX_FLAG_ACKNOWLEDGED = 2001
EVENT_RX_BURST_PACKET = 3000
EVENT_RX_FLAG_BURST_PACKET = 3001
@staticmethod
def lookup(event):
for key, value in Message.Code.__dict__.items():
if type(value) == int and value == event:
return key
def __init__(self, mId, data):
self._sync = 0xa4
self._length = len(data)
self._id = mId
self._data = data
self._checksum = (self._sync ^ self._length ^ self._id
^ reduce(lambda x, y: x ^ y, data))
def __repr__(self):
return str.format(
"<ant.base.Message {0:02x}:{1} (s:{2:02x}, l:{3}, c:{4:02x})>",
self._id, format_list(self._data), self._sync,
self._length, self._checksum)
def get(self):
result = array.array('B', [self._sync, self._length, self._id])
result.extend(self._data)
result.append(self._checksum)
return result
'''
Parse a message from an array
'''
@staticmethod
def parse(buf):
sync = buf[0]
length = buf[1]
mId = buf[2]
data = buf[3:-1]
checksum = buf[-1]
assert sync == 0xa4
assert length == len(data)
assert checksum == reduce(lambda x, y: x ^ y, buf[:-1])
return Message(mId, data)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/base/message.py",
"copies": "1",
"size": "6620",
"license": "mit",
"hash": -3803489629086382600,
"line_mean": 37.2658959538,
"line_max": 82,
"alpha_frac": 0.4954682779,
"autogenerated": false,
"ratio": 4.166142227816237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161610505716236,
"avg_score": null,
"num_lines": null
} |
import array
import collections
import struct
import threading
import time
import queue
import logging
import usb.core
import usb.util
from .message import Message
from .commons import format_list
from .driver import find_driver
_logger = logging.getLogger("ant.base.ant")
class Ant:
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = queue.Queue()
self._buffer = array.array("B", [])
self._burst_data = array.array("B", [])
self._last_data = array.array("B", [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(
(
"event",
(message._data[0], Message.Code.EVENT_RX_BROADCAST, message._data[1:]),
)
)
def _on_acknowledge(self, message):
self._events.put(
(
"event",
(
message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED,
message._data[1:],
),
)
)
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(
(
"event",
(channel, Message.Code.EVENT_RX_BURST_PACKET, self._burst_data),
)
)
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message is None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (
message._id == Message.ID.BROADCAST_DATA
and message._data == self._last_data
):
# Notifications
if message._id in [
Message.ID.STARTUP_MESSAGE,
Message.ID.SERIAL_ERROR_MESSAGE,
]:
self._events.put(
("response", (None, message._id, message._data))
)
# Response (no channel)
elif message._id in [
Message.ID.RESPONSE_ANT_VERSION,
Message.ID.RESPONSE_CAPABILITIES,
Message.ID.RESPONSE_SERIAL_NUMBER,
Message.ID.ENABLE_EXT_RX_MESGS,
]:
self._events.put(
("response", (None, message._id, message._data))
)
# Response (channel)
elif message._id in [
Message.ID.RESPONSE_CHANNEL_STATUS,
Message.ID.RESPONSE_CHANNEL_ID,
]:
self._events.put(
(
"response",
(message._data[0], message._id, message._data[1:]),
)
)
# Response (other)
elif (
message._id == Message.ID.RESPONSE_CHANNEL
and message._data[1] != 0x01
):
self._events.put(
(
"response",
(message._data[0], message._data[1], message._data[2:]),
)
)
# Channel event
elif (
message._id == Message.ID.RESPONSE_CHANNEL
and message._data[1] == 0x01
):
_logger.debug("Got channel event, %r", message)
self._events.put(
(
"event",
(message._data[0], message._data[1], message._data[2:]),
)
)
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGED_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug(
"Got broadcast data, examine queue to see if we should send anything back"
)
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if (
m._id != Message.ID.BURST_TRANSFER_DATA
or m._data[0] & 0b10000000
): # or m._data[0] == 0:
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == "response":
self.response_function(channel, event, data)
elif event_type == "event":
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[: self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4 :]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
_logger.debug(
"Read data: %s (now have %s in buffer)",
format_list(data),
format_list(self._buffer),
)
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber, ext_assign):
if ext_assign is None:
message = Message(
Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber]
)
else:
message = Message(
Message.ID.ASSIGN_CHANNEL,
[channel, channelType, networkNumber, ext_assign],
)
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def open_rx_scan_mode(self):
message = Message(Message.ID.OPEN_RX_SCAN_MODE, [0, 1]) # [0-Channel, 1-Enable]
self.write_message(message)
def close_channel(self, channel):
_logger.debug("Closing channel %d", channel)
message = Message(Message.ID.CLOSE_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array(
"B", struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType)
)
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array("B", struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def enable_extended_messages(self, channel, enable):
message = Message(Message.ID.ENABLE_EXT_RX_MESGS, [channel, enable])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_broadcast_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.BROADCAST_DATA, array.array("B", [channel]) + data)
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(
Message.ID.ACKNOWLEDGED_DATA, array.array("B", [channel]) + data
)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(
Message.ID.BURST_TRANSFER_DATA, array.array("B", [channel_seq]) + data
)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) // 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8 : i * 8 + 8]
_logger.debug(
"Send burst transfer, packet %d, seq %d, data %s",
i,
sequence,
packet_data,
)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i == 0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
| {
"repo_name": "Tigge/openant",
"path": "ant/base/ant.py",
"copies": "1",
"size": "14211",
"license": "mit",
"hash": -646820878127171300,
"line_mean": 35.7209302326,
"line_max": 98,
"alpha_frac": 0.5176271902,
"autogenerated": false,
"ratio": 4.409246044058331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005674263674565066,
"num_lines": 387
} |
import array
import collections
import struct
import threading
import time
import Queue
import logging
import usb
from message import Message
from commons import format_list
from driver import find_driver
_logger = logging.getLogger("garmin.ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = Queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE, \
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION, \
Message.ID.RESPONSE_CAPABILITIES, \
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS, \
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL \
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if(m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000):# or m._data[0] == 0):
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except Queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4:]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
if data:
_logger.debug("Read data: %s (now have %s in buffer)",
format_list(data), format_list(self._buffer))
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber):
message = Message(Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber])
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array('B', struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType))
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array('B', struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.ACKNOWLEDGE_DATA,
array.array('B', [channel]) + data)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(Message.ID.BURST_TRANSFER_DATA,
array.array('B', [channel_seq]) + data)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) / 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8:i * 8 + 8]
_logger.debug("Send burst transfer, packet %d, seq %d, data %s", i, sequence, packet_data)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i==0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/base/ant.py",
"copies": "1",
"size": "12100",
"license": "mit",
"hash": 8895778030284559000,
"line_mean": 39.0662251656,
"line_max": 109,
"alpha_frac": 0.555785124,
"autogenerated": false,
"ratio": 4.245614035087719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004000467308344818,
"num_lines": 302
} |
import collections
import threading
import logging
import Queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("garmin.ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = Queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
channel = Channel(0, self, self.ant)
self.channels[0] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except Queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/easy/node.py",
"copies": "1",
"size": "4653",
"license": "mit",
"hash": 4643246813690936000,
"line_mean": 35.0697674419,
"line_max": 84,
"alpha_frac": 0.6303460133,
"autogenerated": false,
"ratio": 4.0673076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5197653705607692,
"avg_score": null,
"num_lines": null
} |
import collections
import threading
import logging
from ant.base.message import Message
from ant.easy.exception import TransferFailedException
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("garmin.ant.easy.channel")
class Channel():
class Type:
BIDIRECTIONAL_RECEIVE = 0x00
BIDIRECTIONAL_TRANSMIT = 0x10
SHARED_BIDIRECTIONAL_RECEIVE = 0x20
SHARED_BIDIRECTIONAL_TRANSMIT = 0x30
UNIDIRECTIONAL_RECEIVE_ONLY = 0x40
UNIDIRECTIONAL_TRANSMIT_ONLY = 0x50
def __init__(self, id, node, ant):
self.id = id
self._node = node
self._ant = ant
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._node._events, self._node._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._node._responses, self._node._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._node._responses, self._node._responses_cond)
def _assign(self, channelType, networkNumber):
self._ant.assign_channel(self.id, channelType, networkNumber)
return self.wait_for_response(Message.ID.ASSIGN_CHANNEL)
def _unassign(self):
pass
def open(self):
self._ant.open_channel(self.id)
return self.wait_for_response(Message.ID.OPEN_CHANNEL)
def set_id(self, deviceNum, deviceType, transmissionType):
self._ant.set_channel_id(self.id, deviceNum, deviceType, transmissionType)
return self.wait_for_response(Message.ID.SET_CHANNEL_ID)
def set_period(self, messagePeriod):
self._ant.set_channel_period(self.id, messagePeriod)
return self.wait_for_response(Message.ID.SET_CHANNEL_PERIOD)
def set_search_timeout(self, timeout):
self._ant.set_channel_search_timeout(self.id, timeout)
return self.wait_for_response(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT)
def set_rf_freq(self, rfFreq):
self._ant.set_channel_rf_freq(self.id, rfFreq)
return self.wait_for_response(Message.ID.SET_CHANNEL_RF_FREQ)
def set_search_waveform(self, waveform):
self._ant.set_search_waveform(self.id, waveform)
return self.wait_for_response(Message.ID.SET_SEARCH_WAVEFORM)
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self._ant.request_message(self.id, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def send_acknowledged_data(self, data):
try:
_logger.debug("send acknowledged data %s", self.id)
self._ant.send_acknowledged_data(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending acknowledged data %s", self.id)
except TransferFailedException:
_logger.warning("failed to send acknowledged data %s, retrying", self.id)
self.send_acknowledged_data(data)
def send_burst_transfer_packet(self, channelSeq, data, first):
_logger.debug("send burst transfer packet %s", data)
self._ant.send_burst_transfer_packet(channelSeq, data, first)
def send_burst_transfer(self, data):
try:
#self._last_call = (self.send_burst_transfer, [self.id, data])
_logger.debug("send burst transfer %s", self.id)
self._ant.send_burst_transfer(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_START])
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending burst transfer %s", self.id)
except TransferFailedException:
_logger.warning("failed to send burst transfer %s, retrying", self.id)
self.send_burst_transfer(data)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/easy/channel.py",
"copies": "1",
"size": "5112",
"license": "mit",
"hash": 7199680817698983000,
"line_mean": 41.2479338843,
"line_max": 93,
"alpha_frac": 0.6852503912,
"autogenerated": false,
"ratio": 3.6566523605150216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9737771975599276,
"avg_score": 0.020826155223149054,
"num_lines": 121
} |
import logging
from ant.base.message import Message
from ant.easy.exception import AntException, TransferFailedException
_logger = logging.getLogger("garmin.ant.easy.filter")
def wait_for_message(match, process, queue, condition):
"""
Wait for a specific message in the *queue* guarded by the *condition*
matching the function *match* (which is a function that takes a
message as a parameter and returns a boolean). The messages is
processed by the *process* function before returning it.
"""
_logger.debug("wait for message matching %r", match)
condition.acquire()
for _ in range(100):
_logger.debug("looking for matching message in %r", queue)
#_logger.debug("wait for response to %#02x, checking", mId)
for message in queue:
if match(message):
_logger.debug(" - response found %r", message)
queue.remove(message)
condition.release()
return process(message)
elif (message[1] == 1 and
message[2][0] == Message.Code.EVENT_TRANSFER_TX_FAILED):
_logger.warning("Transfer send failed:")
_logger.warning(message)
queue.remove(message)
condition.release()
raise TransferFailedException()
_logger.debug(" - could not find response matching %r", match)
condition.wait(1.0)
condition.release()
raise AntException("Timed out while waiting for message")
def wait_for_event(ok_codes, queue, condition):
def match((channel, event, data)):
return data[0] in ok_codes
def process((channel, event, data)):
return (channel, event, data)
return wait_for_message(match, process, queue, condition)
def wait_for_response(event_id, queue, condition):
"""
Waits for a response to a specific message sent by the channel response
message, 0x40. It's expected to return RESPONSE_NO_ERROR, 0x00.
"""
def match((channel, event, data)):
return event == event_id
def process((channel, event, data)):
if data[0] == Message.Code.RESPONSE_NO_ERROR:
return (channel, event, data)
else:
raise Exception("Responded with error " + str(data[0])
+ ":" + Message.Code.lookup(data[0]))
return wait_for_message(match, process, queue, condition)
def wait_for_special(event_id, queue, condition):
"""
Waits for special responses to messages such as Channel ID, ANT
Version, etc. This does not throw any exceptions, besides timeouts.
"""
def match((channel, event, data)):
return event == event_id
def process(event):
return event
return wait_for_message(match, process, queue, condition)
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/easy/filter.py",
"copies": "1",
"size": "3914",
"license": "mit",
"hash": 1324127365280454700,
"line_mean": 42.010989011,
"line_max": 77,
"alpha_frac": 0.673479816,
"autogenerated": false,
"ratio": 4.177161152614728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004833486534253857,
"num_lines": 91
} |
import struct
class Beacon:
class ClientDeviceState:
LINK = 0x00 # 0b0000
AUTHENTICATION = 0x01 # 0b0001
TRANSPORT = 0x02 # 0b0010
BUSY = 0x03 # 0b0011
BEACON_ID = 0x43
def is_data_available(self):
return bool(self._status_byte_1 & 0x20) # 0b00100000
def is_upload_enabled(self):
return bool(self._status_byte_1 & 0x10) # 0b00010000
def is_pairing_enabled(self):
return bool(self._status_byte_1 & 0x08) # 0b00001000
def get_channel_period(self):
return self._status_byte_1 & 0x07 # 0b00000111, TODO
def get_client_device_state(self):
return self._status_byte_2 & 0x0f # 0b00001111, TODO
def get_serial(self):
return struct.unpack("<I", self._descriptor)[0]
def get_descriptor(self):
return struct.unpack("<HH", self._descriptor)
@staticmethod
def parse(data):
values = struct.unpack("<BBBB4x", data)
assert values[0] == 0x43
beacon = Beacon()
beacon._status_byte_1 = values[1]
beacon._status_byte_2 = values[2]
beacon._authentication_type = values[3]
beacon._descriptor = data[4:]
return beacon
| {
"repo_name": "ddboline/Garmin-Forerunner-610-Extractor_fork",
"path": "ant/fs/beacon.py",
"copies": "1",
"size": "2371",
"license": "mit",
"hash": -8777183386375832000,
"line_mean": 33.8676470588,
"line_max": 77,
"alpha_frac": 0.6773513286,
"autogenerated": false,
"ratio": 3.7875399361022364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49648912647022364,
"avg_score": null,
"num_lines": null
} |
import collections
import threading
import logging
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.node")
class Node:
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype, network_number=0x00, ext_assign=None):
size = len(self.channels)
channel = Channel(size, self, self.ant)
self.channels[size] = channel
channel._assign(ctype, network_number, ext_assign)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(("burst", channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(("broadcast", channel, data))
elif event == Message.Code.EVENT_TX:
self._datas.put(("broadcast_tx", channel, data))
elif event == Message.Code.EVENT_RX_ACKNOWLEDGED:
self._datas.put(("acknowledge", channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == "broadcast":
self.channels[channel].on_broadcast_data(data)
elif data_type == "burst":
self.channels[channel].on_burst_data(data)
elif data_type == "broadcast_tx":
self.channels[channel].on_broadcast_tx_data(data)
elif data_type == "acknowledge":
self.channels[channel].on_acknowledge_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
| {
"repo_name": "Tigge/openant",
"path": "ant/easy/node.py",
"copies": "1",
"size": "5190",
"license": "mit",
"hash": 4628207201333385000,
"line_mean": 35.2937062937,
"line_max": 84,
"alpha_frac": 0.638150289,
"autogenerated": false,
"ratio": 3.9984591679506933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136609456950693,
"avg_score": null,
"num_lines": null
} |
import logging
from ant.base.message import Message
from ant.easy.exception import AntException, TransferFailedException
_logger = logging.getLogger("ant.easy.filter")
def wait_for_message(match, process, queue, condition):
"""
Wait for a specific message in the *queue* guarded by the *condition*
matching the function *match* (which is a function that takes a
message as a parameter and returns a boolean). The messages is
processed by the *process* function before returning it.
"""
_logger.debug("wait for message matching %r", match)
condition.acquire()
for _ in range(10):
_logger.debug("looking for matching message in %r", queue)
# _logger.debug("wait for response to %#02x, checking", mId)
for message in queue:
if match(message):
_logger.debug(" - response found %r", message)
queue.remove(message)
condition.release()
return process(message)
elif message[1] == 1 and message[2][0] in [
Message.Code.EVENT_TRANSFER_TX_FAILED,
Message.Code.EVENT_RX_FAIL_GO_TO_SEARCH,
]:
_logger.warning("Transfer send failed:")
_logger.warning(message)
queue.remove(message)
condition.release()
raise TransferFailedException()
_logger.debug(" - could not find response matching %r", match)
condition.wait(1.0)
condition.release()
raise AntException("Timed out while waiting for message")
def wait_for_event(ok_codes, queue, condition):
def match(params):
channel, event, data = params
return data[0] in ok_codes
def process(params):
return params
return wait_for_message(match, process, queue, condition)
def wait_for_response(event_id, queue, condition):
"""
Waits for a response to a specific message sent by the channel response
message, 0x40. It's expected to return RESPONSE_NO_ERROR, 0x00.
"""
def match(params):
channel, event, data = params
return event == event_id
def process(params):
channel, event, data = params
if data[0] == Message.Code.RESPONSE_NO_ERROR:
return params
else:
raise Exception(
"Responded with error "
+ str(data[0])
+ ":"
+ Message.Code.lookup(data[0])
)
return wait_for_message(match, process, queue, condition)
def wait_for_special(event_id, queue, condition):
"""
Waits for special responses to messages such as Channel ID, ANT
Version, etc. This does not throw any exceptions, besides timeouts.
"""
def match(params):
channel, event, data = params
return event == event_id
def process(params):
return params
return wait_for_message(match, process, queue, condition)
| {
"repo_name": "Tigge/openant",
"path": "ant/easy/filter.py",
"copies": "1",
"size": "4086",
"license": "mit",
"hash": 2913082721724383000,
"line_mean": 34.8421052632,
"line_max": 77,
"alpha_frac": 0.6566324033,
"autogenerated": false,
"ratio": 4.260688216892596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5417320620192596,
"avg_score": null,
"num_lines": null
} |
import logging
from ant.base.message import Message
from ant.easy.exception import TransferFailedException
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.channel")
class Channel:
class Type:
BIDIRECTIONAL_RECEIVE = 0x00
BIDIRECTIONAL_TRANSMIT = 0x10
SHARED_BIDIRECTIONAL_RECEIVE = 0x20
SHARED_BIDIRECTIONAL_TRANSMIT = 0x30
UNIDIRECTIONAL_RECEIVE_ONLY = 0x40
UNIDIRECTIONAL_TRANSMIT_ONLY = 0x50
def __init__(self, id, node, ant):
self.id = id
self._node = node
self._ant = ant
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._node._events, self._node._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(
event_id, self._node._responses, self._node._responses_cond
)
def wait_for_special(self, event_id):
return wait_for_special(
event_id, self._node._responses, self._node._responses_cond
)
def _assign(self, channelType, networkNumber, ext_assign):
self._ant.assign_channel(self.id, channelType, networkNumber, ext_assign)
return self.wait_for_response(Message.ID.ASSIGN_CHANNEL)
def _unassign(self):
pass
def open(self):
self._ant.open_channel(self.id)
return self.wait_for_response(Message.ID.OPEN_CHANNEL)
def open_rx_scan_mode(self):
self._ant.open_rx_scan_mode()
return self.wait_for_response(Message.ID.OPEN_RX_SCAN_MODE)
def close(self):
self._ant.close_channel(self.id)
return self.wait_for_response(Message.ID.CLOSE_CHANNEL)
def set_id(self, deviceNum, deviceType, transmissionType):
self._ant.set_channel_id(self.id, deviceNum, deviceType, transmissionType)
return self.wait_for_response(Message.ID.SET_CHANNEL_ID)
def set_period(self, messagePeriod):
self._ant.set_channel_period(self.id, messagePeriod)
return self.wait_for_response(Message.ID.SET_CHANNEL_PERIOD)
def set_search_timeout(self, timeout):
self._ant.set_channel_search_timeout(self.id, timeout)
return self.wait_for_response(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT)
def set_rf_freq(self, rfFreq):
self._ant.set_channel_rf_freq(self.id, rfFreq)
return self.wait_for_response(Message.ID.SET_CHANNEL_RF_FREQ)
def enable_extended_messages(self, enable):
self._ant.enable_extended_messages(self.id, enable)
return self.wait_for_response(Message.ID.ENABLE_EXT_RX_MESGS)
def set_search_waveform(self, waveform):
self._ant.set_search_waveform(self.id, waveform)
return self.wait_for_response(Message.ID.SET_SEARCH_WAVEFORM)
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self._ant.request_message(self.id, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def send_broadcast_data(self, data):
_logger.debug("send broadcast data %s", self.id)
self._ant.send_broadcast_data(self.id, data)
def send_acknowledged_data(self, data):
try:
_logger.debug("send acknowledged data %s", self.id)
self._ant.send_acknowledged_data(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending acknowledged data %s", self.id)
except TransferFailedException:
_logger.warning("failed to send acknowledged data %s, retrying", self.id)
self.send_acknowledged_data(data)
def send_burst_transfer_packet(self, channelSeq, data, first):
_logger.debug("send burst transfer packet %s", data)
self._ant.send_burst_transfer_packet(channelSeq, data, first)
def send_burst_transfer(self, data):
try:
_logger.debug("send burst transfer %s", self.id)
self._ant.send_burst_transfer(self.id, data)
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_START])
self.wait_for_event([Message.Code.EVENT_TRANSFER_TX_COMPLETED])
_logger.debug("done sending burst transfer %s", self.id)
except TransferFailedException:
_logger.warning("failed to send burst transfer %s, retrying", self.id)
self.send_burst_transfer(data)
| {
"repo_name": "Tigge/openant",
"path": "ant/easy/channel.py",
"copies": "1",
"size": "5602",
"license": "mit",
"hash": 6322932716671687000,
"line_mean": 39.5942028986,
"line_max": 85,
"alpha_frac": 0.6847554445,
"autogenerated": false,
"ratio": 3.609536082474227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794291526974227,
"avg_score": null,
"num_lines": null
} |
import struct
class Beacon:
class ClientDeviceState:
LINK = 0x00 # 0b0000
AUTHENTICATION = 0x01 # 0b0001
TRANSPORT = 0x02 # 0b0010
BUSY = 0x03 # 0b0011
BEACON_ID = 0x43
def __init__(self, status_byte_1, status_byte_2, authentication_type, descriptor):
self._status_byte_1 = status_byte_1
self._status_byte_2 = status_byte_2
self._authentication_type = authentication_type
self._descriptor = descriptor
def is_data_available(self):
return bool(self._status_byte_1 & 0x20) # 0b00100000
def is_upload_enabled(self):
return bool(self._status_byte_1 & 0x10) # 0b00010000
def is_pairing_enabled(self):
return bool(self._status_byte_1 & 0x08) # 0b00001000
def get_channel_period(self):
return self._status_byte_1 & 0x07 # 0b00000111, TODO
def get_client_device_state(self):
return self._status_byte_2 & 0x0F # 0b00001111, TODO
def get_serial(self):
return struct.unpack("<I", self._descriptor)[0]
def get_descriptor(self):
return struct.unpack("<HH", self._descriptor)
@staticmethod
def parse(data):
mark, status_byte_1, status_byte_2, authentication_type = struct.unpack(
"<BBBB4x", data
)
assert mark == Beacon.BEACON_ID
return Beacon(status_byte_1, status_byte_2, authentication_type, data[4:])
| {
"repo_name": "Tigge/openant",
"path": "ant/fs/beacon.py",
"copies": "1",
"size": "2548",
"license": "mit",
"hash": -7227692724954946000,
"line_mean": 34.8873239437,
"line_max": 86,
"alpha_frac": 0.6911302983,
"autogenerated": false,
"ratio": 3.7142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49054160125857144,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 01/10/2016 - Developed for Teaching
## ACSL 2012-2013 - Cells
def cell_divide(cell):
"""
Pick the first 4 characters replicate it
Pick the last 4 characters replicate it
return the concatenated copy of above 2
"""
first = cell[:4] ## Maximum 4 characters starting from first
second = cell[-4:] ## Maximum 4 characters starting from last
firstcell = first + first ## Replicate
secondcell = second + second ## Replicate
return firstcell + ' and ' + secondcell ## Concatenate
def cell_addn(cell, n):
"""
Replicate the first n bits and add it to the front of cell
Delete the last n bits
"""
first = cell[:n] ## Get the first n bits
return (first + cell)[:-n] ## Replicate/add it to cell and delete the last n cells
def cell_subtractn(cell, n):
"""
Replicate the last n bits and delete the first n bits
"""
rest = cell[n:] ## Delete the first n bits
last = cell[-n:] ## Get the last 4 bits
return (rest + last) ## Replicate the last 4 or add the above two
"""
Main program starts here - Get 5 operations and cells to operate on
"""
ops = [] ## Gather all the operations (5 each)
cells = [] ## Gather all the cells to operate on
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
ops.append(data[0].strip())
cells.append(data[1].strip())
for i in range(5) :
op = ops[i]
cell = cells[i]
result = ''
if op == 'DIVIDE' :
result = cell_divide(cell)
elif op.startswith('ADD'):
n = int(op[-1:])
result = cell_addn(cell, n)
elif op.startswith('SUBTRACT'):
n = int(op[-1:])
result = cell_subtractn(cell, n)
print result
"""
Results of execution input/output
>>>
DIVIDE, ABBCDFGG
ADD3, ABBCDFGG
SUBTRACT3, ABBCDFGG
SUBTRACT3, GGABBCDF
ADD3, GGABBCDF
ABBCABBC and DFGGDFGG
ABBABBCD
CDFGGFGG
BBCDFCDF
GGAGGABB
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest2/cells.py",
"copies": "1",
"size": "1981",
"license": "mit",
"hash": -1310818381106106400,
"line_mean": 25.7702702703,
"line_max": 87,
"alpha_frac": 0.6158505805,
"autogenerated": false,
"ratio": 3.4037800687285222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4519630649228522,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 01/10/2016 - Developed for Teaching
## ACSL 2013-2014 - PRINT FORMATTING
def acsl_format(format_str, value_str):
"""
Define a function which takes a value string and formats it
using the passed in value string
"""
pre_formatted_value = [] ## Array in which we accumulate the formatted value
## Lets get the length of the passed in strings, we need to process
## them from the back to front ad the formatting is applied to the rightmost place first
f_len = len(format_str) - 1
v_len = len(value_str) - 1
is_negative = False ## Indicate whether the number is -ve or +ve
if value_str[0] == '-' :
is_negative = True
## Define flags for special formatting requirements and initialize them
add_comma = False
add_dollar = False
add_dollar_with_star_fill = False
add_sign = False ;
"""
Variable 'k' traverses the value string and i traverses the
format string, both move from back to front
Check the format string character value at position i
1) &, if there are no more value characters available (k < 0)
or if we have '-' sign in value at k=0 fill the place with, '*'
else fill it with value character and decrement k, we used up 1 now
2) ,. Mark it as we need to fill with , every third character from right
3) $ only mark it as we are adding dollar with no * fills,
*$ we fill with * on left of dollar
4) -, we need to add a sign character at the end of number
At the end of this processing, pre_formatted_value will have as
many characters as format string length with value string in right
and * filled to the left
Note: A.insert(0, c) will insert the character c at the front of the array A
string.join(array) will combine all characters in the array as string
"""
k = v_len;
for i in range(f_len, -1, -1) :
x = format_str[i] ## Current format character
if x == '&' :
if (k < 0) or ((k==0) and is_negative):
pre_formatted_value.insert(0, '*')
else: ## if (k > 0) or ((k==0) and not is_negative):
pre_formatted_value.insert(0, value_str[k])
k = k - 1
elif x == ',' :
add_comma = True
elif x == '$' :
add_dollar = True
elif x == '*' and add_dollar : ## Check, if previous character is dollar
add_dollar = False ## Make the flag as add_dollar_with_star_fill
add_dollar_with_star_fill = True
elif x == '-' :
add_sign = True
"""
Now we need to apply additional formatting to the string
as we gathered earlier and generate a new array post_formatted_value
i traverses from right to left on pre_formatted_value
comma_pos is every third position from the left
If we are the last place (first charcter in processing) and we need
to add sign bit then add that
If we are at comma_pos and we need to add comm then we add comma
and set the next expected comma position (-3)
If we have reached the fillers '*' thne if we need to add $ we add that
and exit the loop - because we no longer need to add anything
If we need to add $ with fillers we add filler and reset the flag so
that default condition of adding whatever in the pre_formatted_value
will take care
"""
post_formatted_value = []
a_len = len(pre_formatted_value) - 1
comma_pos = a_len - 3;
for i in range(a_len, -1, -1) :
x = pre_formatted_value[i]
if add_sign and i == a_len:
if is_negative :
post_formatted_value.insert(0, '-')
else :
post_formatted_value.insert(0, '*')
if add_comma and i == comma_pos and x != '*':
post_formatted_value.insert(0, ',')
comma_pos = comma_pos - 3
if x == '*' :
if add_dollar :
post_formatted_value.insert(0, '$')
break ;
elif add_dollar_with_star_fill :
post_formatted_value.insert(0, '$')
add_dollar_with_star_fill = False
post_formatted_value.insert(0, x)
## Make a string with array of characters and return it to the caller
return "".join(post_formatted_value)
"""
Main program starts here...
We read in 5 format strings and the value to be formatted and store them in arrays
[As per new guidelines ', ' (comma followed by blank) seperated, no numbering]
Then we call the acsl_format method defined above on these format and values and
print the formatted string
"""
format_string = []
value_string = []
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
format_string.append(data[0].strip())
value_string.append(data[1].strip())
for i in range(5) :
formatted_value = acsl_format(format_string[i], value_string[i])
print formatted_value
"""
Results of execution input/output
>>>
&&&&&&, 456
&&&&&&,&, 1000000
$&&&&, 123
*$&&&&, 123
&&&&-, -123
***456
1,000,000
$123
*$123
*123-
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest2/print_formatting.py",
"copies": "1",
"size": "5416",
"license": "mit",
"hash": -3929356945103358500,
"line_mean": 37.1408450704,
"line_max": 92,
"alpha_frac": 0.5781019202,
"autogenerated": false,
"ratio": 3.9161243673174257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49942262875174254,
"avg_score": null,
"num_lines": null
} |
## Antony Philip (01/17/2016) - Developed for Teaching
## ACSL 2011-2012 - Search
def find_match(pattern, value):
"""
This function return true or false depending on whether the
pattern (with embedded wild card chars) matched the given value
string or not
"""
p_len = len(pattern)
v_len = len(value)
i = 0 ## index over pattern
j = 0 ## index over value
lookaheadset = False ## Lookahead is set when we encounter a '*'
matched = True
while j < v_len and i < p_len and matched : ## As long there is match and we have characters
p = pattern[i]
v = value[j]
if p == '?' or v == p : ## If the characters match or the pattern is '? we simply move the pointer
i = i + 1
j = j + 1
lookaheadset = False ## Look ahead is no longer in place because we might have matched char after '*'
elif p == '*':
lookaheadset = True ## Set the lookahead to true to enable skipping
i = i + 1
elif lookaheadset : ## Simply skip the characters
j = j + 1
else :
matched = False ## No match
if i < p_len : ## Check if we run out of chars in before exhausting all pattern chars
matched = False
return matched
def match(pattern, stringdb) :
"""
Checks for pattern matches in string database given as a sequence of strings
"""
result = []
for s in stringdb :
matched = find_match(pattern, s)
if matched : ## Gather all matching strings
result.append(s)
if not result: ## If no match to any of the string values
result.append('No Match')
return result
"""
Main program starts here
"""
search_data = []
patterns = []
## First get the search data / strings in the first line
## Comma separated strings
userinput = raw_input()
data = userinput.split(', ')
for d in data:
search_data.append(d.strip())
## Next 5 lines will contain the pattern matcher to use for searching
## in separate lines
for i in range(5) :
userinput = raw_input()
patterns.append(userinput.strip())
## We process them one at a time
for i in range(5) :
pattern = patterns[i]
matched_list = match(pattern, search_data)
print ', '.join(matched_list) ## Print the sequence as a string
"""
Results of execution input/output
>>>
BELLS, TELLS, DOLLS, DOLLIES
B*
D?LLS
*IES
?ELL
*ELL
BELLS
DOLLS
DOLLIES
BELLS, TELLS
BELLS, TELLS
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest2/search.py",
"copies": "1",
"size": "2572",
"license": "mit",
"hash": 6328735338947809000,
"line_mean": 27.5888888889,
"line_max": 114,
"alpha_frac": 0.5948678072,
"autogenerated": false,
"ratio": 3.8388059701492536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4933673777349254,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 01/17/2016 - Initial version
## ACSL 2011-2012 - Bits
"""
Prefixes of a string are all substrings starting at 0.
For example prefix of a string 'ABCDE' are
['', 'A', 'AB','ABC', 'ABCD' , 'ABCDE']
Key observation in this problem is when we encounter
a '*' we replicate the prefix and add '0' and '1' to it
If it is not '*' we simply add the bit to the prefix.
"""
def process_prefix_for_asterix(prefix):
"""
This little function forms two strings (ending with 0 and 1)
using the given prefix bit string returning them in a sequence
"""
return [prefix + '0', prefix + '1']
def process_prefix_for_bit(prefix, bit):
"""
This function appends the given bit (character 0 or 1) to the
prefix string and returns one string in a sequence
"""
return [prefix + bit]
def process_prefix_for_bit_or_asterix(prefix, b):
"""
This is a function using the above two functions to
to process the prefix for '*' or bit
"""
if b == '*' :
return process_prefix_for_asterix(prefix)
else :
return process_prefix_for_bit(prefix, b)
"""
Main program starts here - Get 5 bit expressions with *'s to work on
"""
bit_expressions = [] ## Gather all the expressions require processing
## We gather all the 5 given bit string inputs
for i in range(5) :
userinput = raw_input()
bit_expressions.append(userinput.strip())
## We process them one at a time
for i in range(5) :
bit_expr = bit_expressions[i] ## Get the current expression to process
results = [] ## Results of processing this particular bit expression
for b in bit_expr : ## for each bit (0 or 1) or '*' in the expression
if not results : ## Simple way to check whether a sequence [] is empty
results = process_prefix_for_bit_or_asterix('', b) ## We process assuming prefix is ''
else :
result_for_bit = [] ## Processed result for this particular bit
for r in results : ## process each of the prefix strings in results
result_for_bit = result_for_bit + process_prefix_for_bit_or_asterix(r, b)
results = result_for_bit ## This is the new results going forth
print ', '.join(results) ## Simple way to combine a sequence of strings as one string for printing
"""
Results of execution input/output
>>>
1*001
1*1*1
1**11
*1111
*111*
10001, 11001
10101, 10111, 11101, 11111
10011, 10111, 11011, 11111
01111, 11111
01110, 01111, 11110, 11111
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest2/bits.py",
"copies": "1",
"size": "2582",
"license": "mit",
"hash": -7938319823977481000,
"line_mean": 32.1025641026,
"line_max": 102,
"alpha_frac": 0.6324554609,
"autogenerated": false,
"ratio": 3.7044476327116214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48369030936116214,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 01/18/2016 - Developed for Teaching
## ACSL 2014-2015 - LISP Expressions
class LispExpr(object) :
"""
This class is intended to be used as a base class for lisp expressions
LISP expression is defined as an Atom (integer or character in this case)
or a list. This list is parenthesis enclosed Atom or list (recursive definition)
If you remember recursion
- Atom is the base condition
- list is recursive step defined in terms of Atoms and lists
This class has a static method which will be used to parse the quoted
LISP expression
"""
def __init__(self):
pass
@staticmethod
def parse(x) : ## Input is quoted lisp expression
i = x.find('\'') ## Find the index of quote - we need to parse from that point onwards
le = ListT() ## Create a list object
le.parse(x, i+1) ## Parse string after quote
return le.car() ## Parsed LISP expression is contained as a single member this temporary class
class Atom(LispExpr):
"""
Atom is derived class which is also a LispExpr (lisp expression)
which has a single member value - Typically character or integer
"""
def __init__(self, v):
self.value = v
def __str__(self):
return self.value
class ListT(LispExpr):
"""
ListT is a derived class which is also a LispExpr defined to
capture LISP list
"""
def __init__(self):
self.data = [] ## List contains a sequence of LispExpr objects
def add(self, d):
self.data.append(d) ## Add a LispExpr to the list (Atom or ListT)
def __str__(self): ## String representation with parenthesis for printing
return '(' + ''.join( map(lambda x : x.__str__(), (self.data))) + ')'
def car(self): ## First element of the list
return self.data[0]
def cdr(self): ## Remaining elements of list as list
return '\'(' + ''.join( map(lambda x : x.__str__(), (self.data[1:]))) + ')'
def last(self): ## Last element of list
return self.data[-1]
def exceptlast(self): ## All elements except the last one
return '\'(' + ''.join( map(lambda x : x.__str__(), (self.data[:-1]))) + ')'
def nlists(self): ## Number of list elements in this list
return sum(type(x) == ListT for x in self.data)
def parse(self, s, i): ## Parse the list expression
l = len(s)
while (i < l) :
if s[i].isspace() : ## Skip spaces
i = i + 1
elif s[i].isalnum() : ## Add alphanumeric characters as Atoms
self.add(Atom(s[i]))
i = i + 1
elif s[i] == '(' : ## We are starting to parse a new list to add to the current one
v = ListT()
i = v.parse(s, i+1) ## Recurse call to parse the enclosed list and update the index
self.add(v)
elif s[i] == ')': ## Recursive call ended
i = i + 1
break
return i ## Return how many characters are parsed so far or current index position
"""
Main program starts here - Get 5 lisp expressions to work on
"""
lisp_expressions = [] ## Gather all the expressions which require processing
## We gather all the 5 given expressions
for i in range(5) :
userinput = raw_input()
lisp_expressions.append(userinput.strip())
## We process them one at a time
le = lisp_expressions[0]
print LispExpr.parse(le).car() ## We print the car of the first expression
le = lisp_expressions[1]
print LispExpr.parse(le).cdr() ## We print the cdr of the second expression
le = lisp_expressions[2] ## We print the last of the third expression
print LispExpr.parse(le).last()
le = lisp_expressions[3] ## We print everything except the last one for the 4th
print LispExpr.parse(le).exceptlast()
le = lisp_expressions[4] ## We print the number of lists in the 5 th expression
print LispExpr.parse(le).nlists()
"""
Results of execution input/output
>>>
'(A B C D)
'((A B C D E) 3)
'(A B (C D) E)
'((A B) (C D) E (F G))
'((A) (B C D) E F (G H I J))
A
'(3)
E
'((AB)(CD)E)
3
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest2/lisp_expr.py",
"copies": "1",
"size": "4408",
"license": "mit",
"hash": 5432339831124417000,
"line_mean": 33.4375,
"line_max": 106,
"alpha_frac": 0.567831216,
"autogenerated": false,
"ratio": 3.7772065124250216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845037728425021,
"avg_score": null,
"num_lines": null
} |
## Antony Philip (02/07/2016) - Developed for Teaching
## ACSL - 2014-2015 - ACSL Isola
class Board(object):
"""
Board is abstracted as a class with arbitrary number of rows and columns
In Isola board we will be passing in 7 x 7 for rows x cols
Note: Tile number starts at 1, Tile positions are 1..49
We have the matrix as [0..6][0..6]
Note that tiles are numbered from bottom to top
"""
def __init__(self, rows, cols, plus, cross, removedTiles):
"""
Init method takes number of rows and columns
Location of Plus and Cross players and the set of removedTile
positions
Board is represented as a python sequence of sequences for clarity
Initially all the tiles are available, so the value is set as True
When the tile is removed then the value is changed to False
(It can be 1 sequence - as long as we can get to the index from tile number)
"""
self._nRows = rows;
self._nCols = cols;
self._values = [[True for j in range(self._nCols)] for i in range(self._nRows)]
self._plus = plus
self._cross = cross
for x in removedTiles :
self.RemoveTile(x)
def Plus(self):
"""
Return the tile number of plus player
"""
return self._plus
def Cross(self):
"""
Return the tile number of Cross player
"""
return self._cross
def RemoveTile(self, tileNumber):
"""
Remove the tile -
"""
nRow = self.Row(tileNumber)
nCol = self.Column(tileNumber)
self._values[nRow][nCol] = False
def Move(self, playerTileNumber, opponentTileNumber):
"""
Best possible move for the current player
given the board conditions
"""
nRow = self.Row(playerTileNumber)
nCol = self.Column(playerTileNumber)
allMoves = []
allMoves.append(self.UpMove(nRow, nCol, opponentTileNumber))
allMoves.append(self.DownMove(nRow, nCol, opponentTileNumber))
allMoves.append(self.LeftMove(nRow, nCol, opponentTileNumber))
allMoves.append(self.RightMove(nRow, nCol, opponentTileNumber))
maxLength = max([len(x) for x in allMoves])
return next((x for x in allMoves if len(x)==maxLength), [])
def TileNumber(self, row, col):
"""
Given a row, col - Return its TileNumber
"""
return self._nCols * row + col + 1 ;
def Row(self, tileNumber):
"""
Given a tileNumber - return the Row number
"""
return int((tileNumber-1)/self._nCols)
def Column(self, tileNumber):
"""
Given a tile number - return the Column number
"""
return (tileNumber-1)%self._nCols
def UpMove(self, nRow, nCol, opponentTileNumber):
"""
Given current position and the opponent tile number
make a move north
"""
moves = []
i = nRow + 1
while (i < self._nRows
and self._values[i][nCol] == True
and self.TileNumber(i, nCol) != opponentTileNumber) :
moves.append(self.TileNumber(i, nCol))
i = i + 1
return moves
def DownMove(self, nRow, nCol, opponentTileNumber):
"""
Given current position and the opponent tile number
make a move south
"""
moves = []
i = nRow - 1
while (i >= 0
and self._values[i][nCol] == True
and self.TileNumber(i, nCol) != opponentTileNumber) :
moves.append(self.TileNumber(i, nCol))
i = i - 1
return moves
def RightMove(self, nRow, nCol, opponentTileNumber):
"""
Given current position and the opponent tile number
make a move east
"""
moves = []
j = nCol + 1
while (j < self._nCols
and self._values[nRow][j] == True
and self.TileNumber(nRow, j) != opponentTileNumber) :
moves.append(self.TileNumber(nRow, j))
j = j + 1
return moves
def LeftMove(self, nRow, nCol, opponentTileNumber):
"""
Given current position and the opponent tile number
make a move west
"""
moves = []
j = nCol - 1
while (j >= 0
and self._values[nRow][j] == True
and self.TileNumber(nRow, j) != opponentTileNumber) :
moves.append(self.TileNumber(nRow, j))
j = j - 1
return moves
boardConfigs = []
## Read in All the 5 board configurations
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
plus = int(data[0].strip())
cross = int(data[1].strip())
removedTiles = []
for tile in data[2:]:
x = tile.strip()
if x == '0' :
break
removedTiles.append(int(x))
board = Board(7, 7, plus, cross, removedTiles)
boardConfigs.append(board)
## Find the best move for Cross player
for board in boardConfigs :
moves = board.Move(board.Cross(), board.Plus())
if (not moves) :
print "NONE"
else :
print ', '.join([str(x) for x in moves])
"""
Results of execution input/output
>>>
40, 42, 41, 49, 0
33, 32, 39, 25, 0
21, 29, 32, 43, 0
28, 26, 33, 25, 19, 0
25, 26, 33, 27, 19, 0
35, 28, 21, 14, 7
31, 30, 29
22, 15, 8, 1
27
NONE
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest3/acsl_isola.py",
"copies": "1",
"size": "5701",
"license": "mit",
"hash": -8391748332474909000,
"line_mean": 30.5027624309,
"line_max": 100,
"alpha_frac": 0.5393790563,
"autogenerated": false,
"ratio": 3.88881309686221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492819215316221,
"avg_score": null,
"num_lines": null
} |
## Antony Philip (02/14/2016) - Developed for Teaching
## ACSL 2011-2012 - ACSL Grid Fit
class Board(object):
"""
Board is abstracted as a class with arbitrary number of rows and columns
In ACSL chess board we will be passing in 5 x 5 for rows x cols
We have the matrix as [0..4][0..4].
Note: row and column index from the user start at 1
We represent using sequence which has index start at 0
Labels start from the bottom to top
Again break problem into small functions doing specific things
needed to solve the overall problem
"""
def __init__(self, rows, cols, filledGrids):
"""
Initialize the board
Fill the specified grids leaving the others as available
"""
self._nRows = rows;
self._nCols = cols;
self._values = [[False for j in range(self._nCols)] for i in range(self._nRows)]
for grid in filledGrids :
self.FillGrid(grid)
def FillGrid(self, gridNumber):
"""
Fill the specified grid - No error checking expects grid number from 1..25
"""
row = self.Row(gridNumber)
col = self.Column(gridNumber)
self._values[row][col] = True
def FitPattern(self, pattern):
"""
Fit the specified pattern in the current board and return the position
to the caller
"""
if pattern == 1 :
return self.FitPattern1()
if pattern == 2 :
return self.FitPattern2()
if pattern == 3 :
return self.FitPattern3()
def FitPattern1(self):
"""
Fit pattern 1 - fill in the least available grid position
Return -1, if none available
"""
label = -1
for gridNumber in range(1, self._nRows * self._nCols + 1) :
row = self.Row(gridNumber)
col = self.Column(gridNumber)
if self._values[row][col] == False :
self._values[row][col] = True
label = gridNumber
break;
return label
def FitPattern2(self):
"""
Fit pattern 2 - vertical fit with 2 grids least available position
Return -1, if none available
"""
label = -1
for gridNumber in range(1, self._nRows * self._nCols + 1) :
row = self.Row(gridNumber)
col = self.Column(gridNumber)
if self._values[row][col] == False and row+1 < self._nRows and self._values[row+1][col] == False :
self._values[row][col] = True
self._values[row+1][col] = True
label = gridNumber
break;
return label
def FitPattern3(self):
"""
Fit pattern 3 - horizontal fit with 2 grids least available position
Return -1, if none available
"""
label = -1
for gridNumber in range(1, self._nRows * self._nCols + 1) :
row = self.Row(gridNumber)
col = self.Column(gridNumber)
if self._values[row][col] == False and col+1 < self._nCols and self._values[row][col+1] == False :
self._values[row][col] = True
self._values[row][col+1] = True
label = gridNumber
break;
return label
def GridNumber(self, row, col):
"""
Given a row, col - Return its grid label/number
"""
return self._nCols * row + col + 1 ;
def Row(self, gridNumber):
"""
Given a label - return the Row number
"""
return int((gridNumber-1)/self._nCols)
def Column(self, gridNumber):
"""
Given a label - return the Column number
"""
return (gridNumber-1)%self._nCols
## Main program starts here..
## We need to read-in the initial grid squares which are already filled
## and construct the board to work with
userinput = raw_input()
data = userinput.split(', ')
if len(data) < 1 :
print "Error"
nFilledSquares = int(data[0].strip())
filledSquares = [int(x.strip()) for x in data[1:]] ## List comprehension again!
if len(filledSquares) < 1 :
print "Filled squares data given don't match the count given"
board = Board(5,5, filledSquares)
## Read in all the fill patterns for processing
fill_patterns = []
for i in range(5) :
data = raw_input()
fill_patterns.append(int(data.strip()))
## Fit the specified pattern to the board, if possible
for pattern in fill_patterns :
position = board.FitPattern(pattern)
if position == -1 :
print "NONE"
else :
print position
"""
Results of execution input/output
>>>
8, 1, 3, 5, 6, 7, 8, 10, 13
1
1
2
3
2
2
4
9
11
15
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest3/acsl_grid_fit.py",
"copies": "1",
"size": "4939",
"license": "mit",
"hash": 1962320094708977000,
"line_mean": 29.875,
"line_max": 112,
"alpha_frac": 0.5480866572,
"autogenerated": false,
"ratio": 4.065020576131687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99717448246948,
"avg_score": 0.028272481727377348,
"num_lines": 160
} |
## Antony Philip (02/14/2016) - Developed for Teaching
## ACSL 2012-2013 - ACSL Chess Queen
class Board(object):
"""
Board is abstracted as a class with arbitrary number of rows and columns
In ACSL chess board we will be passing in 5 x 5 for rows x cols
We have the matrix as [0..4][0..4].
Note: row and column index from the user start at 1
We represent using sequence which has index start at 0
"""
def __init__(self, rows, cols):
"""
Initialize the board with location of queen
"""
self._nRows = rows;
self._nCols = cols;
self._values = [[False for j in range(self._nCols)] for i in range(self._nRows)]
def MarkQueenAndItsRange(self, row, col, nsteps):
"""
Mark queen and its influence
Queen can move - vertically, horizontly and in both diagonals
Idea here is to generate the offsets using steps and then use
the offset to create matrix index for all the possible queen
positions - 02/14/1016 ARP
"""
## Convert row and column to our representation - less one index
row = row - 1
col = col - 1
for s in range(-nsteps, nsteps+1) :
## Horizontal check for queen
if col+s >= 0 and col+s < self._nCols and self._values[row][col+s] == False :
self._values[row][col+s] = True
## Vertical check for queen
if row+s >= 0 and row+s < self._nRows and self._values[row+s][col] == False :
self._values[row+s][col] = True
## Major diagonal check - (right most to left most)
if row+s >= 0 and row+s < self._nRows and col-s >= 0 and col-s < self._nCols and self._values[row+s][col-s] == False :
self._values[row+s][col-s] = True
## Minor diagonal check - (left most to right most)
if row+s >= 0 and row+s < self._nRows and col+s >= 0 and col+s < self._nCols and self._values[row+s][col+s] == False :
self._values[row+s][col+s] = True
def GetUnMarkedCount(self):
"""
Get the number of positions which are not influenced by Queen
Note: count method just counts how many values are False
"""
cnt = 0
for i in range(self._nRows) :
cnt = cnt + self._values[i].count(False)
return cnt
## Main program starts here
## As usual, read in 5 sets of data into variables and then process them it later
queen_rows = []
queen_cols = []
steps = []
## Read in All the 5 board configurations
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
if len(data) < 3 :
print 'Data error'
break
queen_rows.append(int(data[0].strip()))
queen_cols.append(int(data[1].strip()))
steps.append(int(data[2].strip()))
for i in range(5) :
board = Board(5,5)
board.MarkQueenAndItsRange(queen_rows[i], queen_cols[i], steps[i])
cnt = board.GetUnMarkedCount()
print cnt
"""
Results of execution input/output
>>>
3, 3, 2
4, 1, 1
5, 3, 2
5, 4, 3
2, 3, 1
8
19
14
13
16
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest3/acsl_chess_queen.py",
"copies": "1",
"size": "3322",
"license": "mit",
"hash": 8062865132213596000,
"line_mean": 32.23,
"line_max": 131,
"alpha_frac": 0.552077062,
"autogenerated": false,
"ratio": 3.6030368763557483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.948469703658277,
"avg_score": 0.03408338035459573,
"num_lines": 100
} |
## Antony Philip (02/15/2016) - Developed for Teaching
## ACSL - 2010-2011 - ACSL Mancala
class Board(object):
"""
Mancala board - 12 bowls
Player A owns bowls 1..6 and mancala A
Player B owns bowls 7..12 and mancala B
Again, Python index starts at 0, but the game bowl starts at 1
"""
def __init__(self):
"""
Initialize the 12 bowls with 4 stones each
"""
self._bowls = [4 for i in range(12)]
self._mancalaA = 0
self._mancalaB = 0
def Play(self, bowl):
"""
Play the stones in specified bowl
"""
bowl = bowl-1 ## Set up the bowl to our representation 1 less value
i = bowl + 1 ## Start from adjacent bowl
while self._bowls[bowl] > 0 : ## While we have some stones to place
current_bowl = i % 12 ## Find the next bowl index, remainder arithmetic
if current_bowl == 6 and bowl < 6 : ## Check if we are player A and placing in Mancala A
self._bowls[bowl] = self._bowls[bowl] - 1 ## Remove the stone
self._mancalaA = self._mancalaA + 1
elif current_bowl == 0 and bowl >= 6 : ## Check if we are player B and placing in Mancala B
self._bowls[bowl] = self._bowls[bowl] - 1 ## Remove the stone
self._mancalaB = self._mancalaB + 1
if self._bowls[bowl] > 0 : ## We still have stones to place after (possibly) placing in Mancala
self._bowls[bowl] = self._bowls[bowl] - 1 ## Remove the stone
self._bowls[current_bowl] = self._bowls[current_bowl] + 1 ## Add the stone
i = i + 1 ## Go for the next bowl
def NumberOfStones(self, bowl):
"""
Return the number of stones in bowl
"""
bowl = bowl - 1
return self._bowls[bowl]
## We gather the starting bowl number and
## the bowl number whose value we need to print
starting_bowl_number = []
print_bowl_number = []
## Construct a new Mancala board
board = Board()
## Read in All the 5 board configurations
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
if len(data) < 2 :
print 'Data error'
break
starting_bowl_number.append(int(data[0].strip()))
print_bowl_number.append(int(data[1].strip()))
## Play for each of the bowls and print the
## stone count at the specified bowl
for i in range(5) :
bowl = starting_bowl_number[i]
cnt_bowl = print_bowl_number[i]
board.Play(bowl)
print board.NumberOfStones(cnt_bowl)
"""
Results of execution input/output
>>>
4, 6
12, 2
6, 12
8, 7
2, 4
5
5
0
6
1
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest3/acsl_mancala.py",
"copies": "1",
"size": "2784",
"license": "mit",
"hash": -6566919280171965000,
"line_mean": 30.6477272727,
"line_max": 107,
"alpha_frac": 0.5610632184,
"autogenerated": false,
"ratio": 3.2791519434628977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9222928586680921,
"avg_score": 0.023457315036395535,
"num_lines": 88
} |
## Antony Philip - 02/28/2016 - Developed in hacker-rank platform
## Normal distribution
from math import *
class normdist(object) :
def __init__(self, mu, sigma) :
"""
Init with mu and sigma
"""
self._mu = mu
self._sigma = sigma
def cdf(self, x):
"""
CDF
"""
z = (x - float(self._mu)) / float(self._sigma);
y = 0.5 * self.erfc(-z/sqrt(2.0));
if y > 1.0:
y = 1.0;
return y
def pdf(self, x):
"""
PDF
"""
u = (x-float(self._mu))/float(self._sigma)
y = (1.0/(sqrt(2.0*pi)* float(self._sigma))) * exp(-u*u/2.0)
return y
def erfc(self, x): ## Python mailing list (Horner's for approx.)
z = abs(x)
t = 1. / (1. + 0.5*z)
r = t * exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r
n = normdist(30, 4)
a1 = n.cdf(40)
a2 = 1 - n.cdf(21)
a3 = n.cdf(35) - n.cdf(30)
print '%0.3f'% a1
print '%0.3f'% a2
print '%0.3f'% a3
| {
"repo_name": "antonyrp/misc-code",
"path": "normdist.py",
"copies": "1",
"size": "1246",
"license": "mit",
"hash": -152035282924132770,
"line_mean": 23.92,
"line_max": 68,
"alpha_frac": 0.4526484751,
"autogenerated": false,
"ratio": 2.756637168141593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37092856432415927,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 03/19/2016 - Developed for teaching
## ACSL Encryption 2010-2011
def CodeNumber(c) :
"""
Expects capital letter alphabets or ' ' as character input
Converts the character into its numeric value
A->1, B->2,,,,Z->26, ' '->27
In simple terms:
ord(c) gives out a number - property of this function is it
maintains the order of alphabets when giving out the number.
value given for A is one less than value given for B. So we
subtract the value of 'A' from given characters value to get
the offset which is the code number
"""
if c == ' ' :
return 27
else:
return ord(c) - ord('A') + 1
def CodeCharacter(n) :
"""
This is the opposite of the above function converts a code
number to its character
Uses chr function which gives out the charcter for the
given number
"""
## Convert the number to 0..26 with 0 representing 27 or char ' '
n = n % 27
if n == 0 :
return ' '
else:
n = ord('A') + n - 1
return chr(n)
def CodeVector(s):
"""
Expects two character string which will be converted to
its vector representation using code numbers.
'MA' becomes [13, 1] because M->13 and A->1
"""
r = []
for c in s :
r.append(CodeNumber(c))
return r
def CodeVectors(s):
"""
Converts an alphabet string into its sequence of length 2
code vectors - Adds a blank if the length is not even to
make all the vectors are of length 2
'MATH ' => [[13, 1], [20, 8], [27, 27]]
"""
if (len(s)%2) :
s = s + ' '
r = []
for i in range(0, len(s), 2) :
r.append(CodeVector(s[i:i+2]))
return r
def DotProduct(a, b):
"""
Calculates dot product of two vectors
[0,1], [13, 1] => 0*13+1*1 => 1
"""
return sum([a[i]*b[i] for i in range(len(a))])
def EncodingVectors(v):
"""
Folds a even element sequence into (two)vectors of length 2
so that it can be used for matrix encryption or dot product
[0, 1, 1, 1] => [[0, 1], [1, 1]]
"""
r = []
for i in range(0, len(v), 2) :
x = []
for c in v[i:i+2] :
x.append(c)
r.append(x)
return r
def Encode(s, v):
"""
Encode a given string s using code vector v using
matrix encryption
'MATH ROCKS', [0, 1, 1, 1] => 'ANHARRCRSC'
"""
M = CodeVectors(s)
C = EncodingVectors(v)
return ''.join([CodeCharacter(DotProduct(c, m)) for m in M for c in C])
"""
Main Program Starts Here -
Read in 5 Lines of input Each line contain an alpha string
and 4 positive integers representing 2x2 encoding matrix
"""
results = []
for i in range(5) :
userinput = raw_input()
data = userinput.split(', ')
s = data[0].strip()
v = map(lambda x : int(x), data[1:])
results.append(Encode(s, v))
## Print the encoded words
for r in results :
print r
"""
Results of execution input/output
>>>
MATH ROCKS, 0, 1, 1, 1
COMPUTER, 1, 1, 3, 2
MATH ROCKS, 0, 1, 1, 1
COMPUTER, 1, 1, 3, 2
MATH ROCKS, 0, 1, 1, 1
ANHARRCRSC
RLBQNVWX
ANHARRCRSC
RLBQNVWX
ANHARRCRSC
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest4/acsl_matrix_encryption.py",
"copies": "1",
"size": "3282",
"license": "mit",
"hash": -668750606430569100,
"line_mean": 25.0555555556,
"line_max": 75,
"alpha_frac": 0.5627666057,
"autogenerated": false,
"ratio": 3.3592630501535314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44220296558535316,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 03/30/2016 - Developed for ACSL teaching
## ACSL 2011-2012 - Infix to Prefix (PIP)
from curses.ascii import isspace
class Node :
"""
Simple class to represent tree node (which is expression - variable or
expression made with operators and variables in tree form)
"""
def __init__(self, value):
"""
Initialize the tree node with value
"""
self._value = value
self._left = None
self._right = None
def SetLeft(self, node):
"""
Set the left subtree of the node
"""
self._left = node
def SetRight(self, node):
"""
Set the right subtree of the node
"""
self._right = node
class Tree :
"""
Utility tree class which has the root node of the tree
"""
def __init__(self, root):
"""
Set up the tree with root node
"""
self._root = root
def PreOrder(self):
"""
Do a preorder traversal of the tree (node)
We need to save the traversal in a variable
that is the reason for having this as a separate method
Returns the preorder traversal as a string
"""
self._traversal = []
self.DoPreOrder(self._root)
return ''.join(self._traversal)
def DoPreOrder(self, node):
"""
Private method called to effect preorder - called recursively
to traverse the tree in pre-order
"""
if (node == None) :
return
self._traversal.append(node._value)
self.DoPreOrder(node._left)
self.DoPreOrder(node._right)
@staticmethod
def IsOperator(c):
"""
Not a tree specific function
Function to check whether the character is an
operator or not
"""
return c in ['+', '-', '*']
@staticmethod
def Compare(c1, c2):
"""
Compare two operators and returns which one takes
higher precedence '*' > '+' or '-'
"""
if c1 == c2 :
return 0
elif c1 == '*' :
return 1
else :
return -1
class Stack :
"""
Simple stack class using Python!
"""
def __init__(self):
"""
Initialize the sequence to hold the stack
"""
self._stack = []
def Push(self, elem):
"""
Push the element to the stack
"""
self._stack.append(elem)
def Pop(self):
"""
Pop the most recently added element
"""
if len(self._stack) <= 0 :
return None
else:
return self._stack.pop()
def Peek(self):
"""
Peek and return the most recently added element to the stack
without removing it
"""
if len(self._stack) <= 0 :
return None
else :
return self._stack[-1]
def Print(self):
"""
Print the stack contents
"""
return ', '.join(map(str, self._stack))
"""
Main program starts here - Get 5 expressions
Construct expression tree for them preserving
operator precedence then do pre-order traversal
on the tree
I use two stacks here - one for the operators (opStack)
and one for the expression subtree nodes (valStack)
1) Scan the infix expression from left to right
a) If the character is space skip it
b) If the character is an alphabet create a tree node for the character
with left and right set to None. Push this node into value stack
c) If the character is operator, check it against the current operator
on top of the operator stack
- if it is same or lower precedence than the operator on the operator
stack simplify the stack
- pop the top of value stack as right node (r)
- pop the top again of value stack as left node (l)
- pop the operator on top of operator stack (o) construct a tree
node with the operator o as root and l and r nodes as
left and right sub trees .
- Push the new node into the value stack
d) Push the current scanned operator character onto operator stack
2) If no characters left for scanning and if the operator stack is not empty
simplify by doing the operations listed in c) until we are left with one
node which will be the entire expression tree.
3) Do the prefix traversal on this tree node - it can be a recursive call
as shown in the code - Add the value of the node and traverse left and
then right recursively
"""
expression_trees = []
## Parse and gather the trees for all the 5 expressions
for i in range(5) :
userinput = raw_input()
infix_expr = userinput.strip()
valStack = Stack() ## Values stack
opStack = Stack() ## Operators stack
for c in infix_expr :
if isspace(c) : ## Skip, if empty space
continue
if Tree.IsOperator(c) :
if opStack.Peek() != None and Tree.Compare(c, opStack.Peek()) <= 0 : ## Simplify the top binop if possible
n = Node(opStack.Pop())
n.SetRight(valStack.Pop())
n.SetLeft(valStack.Pop())
valStack.Push(n)
opStack.Push(c) ## Add the new operator to op-stack
else :
valStack.Push(Node(c)) ## It is value - convert it as node and push it to value stack
c = opStack.Pop() ## Simplify the expressions until the operator stack is empty and we are left with one node
while c != None :
n = Node(c)
n.SetRight(valStack.Pop())
n.SetLeft(valStack.Pop())
valStack.Push(n)
c = opStack.Pop()
tree = Tree(valStack.Pop())
expression_trees.append(tree) ## Add the sole node/tree to our parsed expressions list
## Traverse and print the parsed expressions
for i in range(5) :
tree = expression_trees[i]
print tree.PreOrder() ## Do the preorder and print the expression
"""
Results of execution input/output
>>>
A+B
A+B+C
A * B + C
A + B * C
A - B + C * D
+AB
++ABC
+*ABC
+A*BC
+-AB*CD
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest4/acsl_pip.py",
"copies": "1",
"size": "6396",
"license": "mit",
"hash": 1689897935175814400,
"line_mean": 29.0328638498,
"line_max": 118,
"alpha_frac": 0.5587867417,
"autogenerated": false,
"ratio": 4.401927047487956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5460713789187956,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 03/31/2016 - Developed for ACSL teaching
## ACSL 2014-2015 - Quine-McClusky Algorithm (Simplify Boolean Expressions)
"""
Here some additional Python features are used for you to learn about
1) Ternary operator: https://www.python.org/dev/peps/pep-0308/
2) +=, -=, operators
3) lambda functions, map
4) Dictionaries https://docs.python.org/2/tutorial/datastructures.html
5) bin https://docs.python.org/2/library/functions.html#bin
6) zfill functions https://docs.python.org/2/library/string.html
"""
def Simplify(q1, q2):
"""
Simplify two binary representation of 4 bit numbers
and return the result
using QM algorithm -
Only one bit must be different between the two
numbers, that bit position will be marked 'x'
rest all will return the common bit value (0 or 1)
If more than 1 bit positions are different 'NONE' is
returned as result
If no bit positions are different that is q1 and q2
are same then also 'NONE' is returned.
Check the usage of += operator and ternary operator
"""
m = ''
nDiffers = 0
for i in range(4) :
if q1[i] == q2[i] :
m += t1[i]
else :
m += 'x'
nDiffers += 1
return m if nDiffers == 1 else 'NONE'
def Transform(m):
"""
Transform the simplified binary representation with don't care 'x'
in it to alphabet form:
- Remove don't care x
- From left to right bits take alphabets A - Z or a-z in order depending on 1 or 0
First bit from left takes A if the bit value is 1 or a if the bit value is 0
"""
t = ''
t += '' if m[0]=='x' else ('A' if m[0] == '1' else 'a')
t += '' if m[1]=='x' else ('B' if m[1] == '1' else 'b')
t += '' if m[2]=='x' else ('C' if m[2] == '1' else 'c')
t += '' if m[3]=='x' else ('D' if m[3] == '1' else 'd')
return t
"""
Main program starts here
Check out the usage of lambda (to provide a transform function to work on sequence)
and map (to convert string to int)
bin converts string to binary
[2:] takes characters from 2'nd position to remove '0b' which is put in front by bin function
zfill method fills with 0 we need to have exactly 4 binary characters
"""
## Read in first line input and convert the comma separated numbers to integers
userinput = raw_input()
data = map(lambda x : int(x), userinput.split(', '))
quadruples = {} ## This is the way to define dictionary
for n in data :
if n == -1 : ## -1 acts as terminator
break
quadruples[n] = bin(n)[2:].zfill(4)
## To accumulate results
results = []
## Read-in 5 pairs to check out
for i in range(5) :
userinput = raw_input()
data = map(lambda x : int(x), userinput.split(', '))
t1 = quadruples.get(data[0])
t2 = quadruples.get(data[1])
m = Simplify(t1, t2) ## Simplify the terms
if m == 'NONE' : ## If it cannot be simplified simply add NONE to result
results.append(m)
else :
t = Transform(m) ## Transform the simplified binary string to alpha
results.append( m + ', ' + t) ## Add the transformed thing also
## Print out all the 5 results
for r in results :
print r
"""
Results of execution input/output
>>>
4, 8, 9, 10, 11, 12, 14, 15, -1
4, 12
9, 11
12, 14
9, 14
14, 15
x100, Bcd
10x1, AbD
11x0, ABd
NONE
111x, ABC
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest4/quine_mcclusky.py",
"copies": "1",
"size": "3501",
"license": "mit",
"hash": 1009976901565686300,
"line_mean": 32.0283018868,
"line_max": 97,
"alpha_frac": 0.5995429877,
"autogenerated": false,
"ratio": 3.4089581304771177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4508501118177118,
"avg_score": null,
"num_lines": null
} |
## Antony Philip - 04/03/2016 - Developed for ACSL teaching
## ACSL 2012-2013 - ACSL Rummy
class Rummy :
"""
Simple class wrapper to do ACSL Rummy thing
"""
def __init__(self, draw):
"""
Arrange the drawn 7 cards into sets and runs
Using dictionaries to store card and suit ranks and their inverses (given rank what card or suit it is)
Lambda or functions to store ordering functions for suit and card
Find the sets and runs from the draws. Prune the runs and form all combinations of 3s or 4s
"""
self._draw = set(draw)
self._suitRank = {'S': 4, 'H' : 3, 'C' : 2, 'D' : 1}
self._rankSuit = { y : x for (x,y) in self._suitRank.items()}
self._cardRank = {'A': 1, '2' : 2, '3': 3, '4': 4,'5': 5, '6': 6, '7': 7, '8': 8,'9': 9, 'T' : 10, 'J': 11, 'Q': 12, 'K': 13}
self._rankCard = { y : x for (x,y) in self._cardRank.items()}
self._lambdaSuitRank = lambda(x, y) : 0 if self._suitRank[x] == self._suitRank[y] else (1 if self._suitRank[x] > self._suitRank[y] else -1 )
self._lambdaCardRank = lambda(x, y) : 0 if self._cardRank[x] == self._cardRank[y] else (1 if self._cardRank[x] > self._cardRank[y] else -1 )
self._setDict = {}
tempRunDict = {}
for c in draw : ## Process each card of the draw
v = c[0] ## Card value
t = c[1] ## Card type or suit
if not self._setDict.has_key(v) :
self._setDict[v] = []
self.InsertInOrder(self._setDict[v], t, self._lambdaSuitRank, True) ## Sort using suit rank - high to low
if not tempRunDict.has_key(t) :
tempRunDict[t] = []
self.InsertInOrder(tempRunDict[t], v, self._lambdaCardRank) ## Sort using card rank low to high
self._runDict = self.PruneRunDict(tempRunDict, self._cardRank)
## Prune the run list to remove < 3 cards and split into sequence of sequences for each suit
def Process(self):
"""
Process the suits and runs and left over cards and find the rummy arrangement
"""
fours = []
threes = []
## Gather threes and fours from runs
for rank in range(4, 0, -1) :
k = self._rankSuit[rank]
if self._runDict.has_key(k) :
v = self._runDict[k]
for r in v:
if len(r) == 4 :
fours.append(map(lambda c : c+k, r))
elif len(r) == 3 :
threes.append(map(lambda c : c+k, r))
## Gather threes and fours from sets
for rank in range(1, 14) : ## Possible ranks for cards low to high
k = self._rankCard[rank]
if self._setDict.has_key(k) :
v = self._setDict[k]
if len(v) == 4 :
fours.append(map(lambda t : k+t, v))
elif len(v) == 3 :
threes.append(map(lambda t : k+t, v))
## Check, if 4, 3 possible
r = []
if len(fours) > 0 :
for f in fours :
c = set(f)
for t in threes :
if len(c.intersection(t)) == 0 :
r += f
r += t
break
if len(r) > 0 :
break
## Check, if 3, 3 possible
if len(r) == 0 :
l = len(threes)
for i in range(l-1) :
c = set(threes[i])
for j in range(i+1,l) :
if len(c.intersection(threes[j])) == 0 :
r += threes[i]
r += threes[j]
break
if len(r) > 0 :
break
## Check, if one 4 or 3
if len(r) == 0:
if len (fours) > 0:
r += fours[0]
elif len(threes) > 0 :
r += threes[0]
## Remove the run and/or set from the draw
for x in r :
self._draw.remove(x)
## While we have elements to insert into our order
leftovers = []
while len(self._draw) > 0 :
self.InsertInOrder(leftovers, self._draw.pop(), lambda(x, y) : self.OrphanCardCompare(x,y))
r += leftovers
return ', '.join(r) ## Return the arrangement as a string
def PruneRunDict(self, d, rank):
"""
Prune run dictionary removing runs with less than 3 elements.
Long runs with gaps, Change the runs into 3 and 4 combinations
"""
pruned_runs = {}
for (k,v) in d.items() :
l = len(v)
if l >= 3 :
i = 0
sta = 0
while (i < l-1) :
if (rank[v[i+1]] != rank[v[i]]+1) :
if (i-sta) >= 3 :
if not pruned_runs.has_key(k) :
pruned_runs[k] = []
pruned_runs[k] += self.FindCombinations(v[sta:i+1])
sta = i+1
i += 1
if (sta < i) and (i+1-sta) >= 3:
if not pruned_runs.has_key(k) :
pruned_runs[k] = []
pruned_runs[k] += self.FindCombinations(v[sta:])
return pruned_runs
def FindCombinations(self, v) :
"""
Given a sequence find 3 and 4 element combinations of that sequence
in order ABCDE will return ABCD, BCDE, ABC,BCD,CDE
"""
c = []
if len(v) == 3 :
c.append(v)
else:
## Choose combinations of 4 and 3 (4 first and then 3)
i = 0
while(i+4 <= len(v)) :
c.append(v[i:i+4])
i+=1
i=0
while(i+3 <= len(v)) :
c.append(v[i:i+3])
i+=1
return c
def InsertInOrder(self, s, v, l, h2l=False):
"""
Insert into sequence a card or suit using the respective ranking order
provided by the lambda
"""
i = 0
cont = True
while i < len(s) and cont:
r = l((s[i], v))
cont = (r >= 0 if h2l else r <= 0)
if cont:
i += 1
s.insert(i, v) ## Insert in the correct place order
def OrphanCardCompare(self, x, y) :
"""
Compare cards using two ranking functions (card order and suit order)
"""
c1 = x[0]
c2 = y[0]
t1 = x[1]
t2 = y[1]
r1 = self._lambdaCardRank((c1, c2))
r2 = self._lambdaSuitRank((t1, t2))
## Highest suit and draw first
if (r1 > 0) :
return -1
if (r1 == 0) and (r2 > 0) :
return -1
if (r1 < 0) : ## Smaller card later
return 1
if (r1 == 0) and (r2 < 0) :
return 1
return 0
"""
Main program starts here
we read in 5 draws of 7 cards each and arrange them in rummy order
"""
results = []
## Read-in draws
for i in range(5) :
userinput = raw_input()
cards = userinput.split(', ')
r = Rummy(cards)
results.append(r.Process())
## Print the results
for r in results :
print r
"""
An Example execution taking 5 draws and then arranging them in order
>>>
8H, 8C, 8S, 2C, 7S, 9H, KD
3S, 4S, TD, 9S, KC, 5S, 6S
KS, JS, KH, JH, KC, JC, 6D
5H, TD, 6H, JD, 7H, QD, 3C
2C, 2D, 4S, 6C, 7H, 7S, 8C
8S, 8H, 8C, KD, 9H, 7S, 2C
3S, 4S, 5S, 6S, KC, TD, 9S
JS, JH, JC, KS, KH, KC, 6D
5H, 6H, 7H, TD, JD, QD, 3C
8C, 7S, 7H, 6C, 4S, 2C, 2D
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest4/acsl_rummy.py",
"copies": "1",
"size": "7942",
"license": "mit",
"hash": 8353015751837990000,
"line_mean": 34.6188340807,
"line_max": 148,
"alpha_frac": 0.450264417,
"autogenerated": false,
"ratio": 3.4560487380330724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9250046718613918,
"avg_score": 0.031253287283831074,
"num_lines": 223
} |
## Antony Philip - 04/17/2016 - Initial version
## ACSL 2015-2016 - ACSL ABC
class Board(object):
def __init__(self, alphabets):
"""
Board initializer - Alphabets to be placed are passed-in
In this case alphabets will be ['A','B','C']
number of rows and columns of the board is same as number of alphabets
Grid will have '' value when no alphabet is placed
Unfilled are the list of grid places which are not pre-filled with alphabets
"""
self._alphabets = alphabets
self._rows = len(alphabets)
self._cols = self._rows
self._count = self._rows * self._cols
self._grid = ['' for i in range(self._count)]
self._unfilled = []
def SetGrid(self, loc, c):
"""
Grid locations start at 1 - So we subtract 1 from passed in location
to set the grid with pre-filled alphabet
"""
self._grid[loc-1] = c
def Play(self):
"""
Called in by the client to play the game of placing the alphabets
"""
self._unfilled = [i for i in range(self._count) if self._grid[i] == '']
if len(self._unfilled) > 0 :
self.Try(0)
return ''.join(self._grid)
def Try(self, pos):
"""
Try placing alphabet for the unfilled 'pos'
"""
if pos >= len(self._unfilled) : ## We have filled all the unfilled positions
return
loc = self._unfilled[pos] ## Grid location for the unfilled position
for letter in self._alphabets : ## Try all alphabets for placement
if self.IsSafe(loc, letter) : ## If location is safe - place it
self._grid[loc] = letter
self.Try(pos+1) ## Recursive step, Forward move
if self._grid[loc] == '' and pos < len(self._unfilled) : ## If we cannot place any alphabet - backtrack
self._grid[self._unfilled[pos-1]] = '' ## Backtracking step
def IsSafe(self, loc, letter):
"""
Letter can be placed in the given loc only if he same letter
is not already in placed in the same row or column as the current
loc we are trying to place
"""
r = loc/self._cols ## matrix row
c = loc%self._cols ## matrix col
cnt = self._rows ## N Rows always equal Cols
isOK = True;
i = 0
while(i < cnt and isOK):
if loc != r*cnt + i : ## Check for locations other than current place
isOK = self._grid[r*cnt + i] != letter and self._grid[i*cnt + c] != letter
i = i+1
return isOK
"""
Main program starts here -
Alphabets are A, B and C
Number of pre-filled positions are the first element followed by pairs of
pre-filled positions and their alphabets
"""
alphabets = ['A', 'B', 'C']
boards = []
for i in range(5) :
b = Board(alphabets)
data = raw_input().split(', ')
n = int(data[0])
for i in range(n) :
offset = 1 + 2*i
loc = int(data[offset].strip())
offset = offset + 1
c = data[offset].strip()
b.SetGrid(loc, c)
boards.append(b)
for b in boards :
print b.Play()
"""
Results of execution input/output
>>>
3, 1, A, 3, C, 8, A
3, 1, A, 6, C, 8, B
3, 1, B, 6, B, 9, C
2, 1, C, 5, B
2, 3, B, 7, A
ABCBCACAB
ACBBACCBA
BCACABABC
CABABCBCA
CABBCAABC
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest3/acsl_abc.py",
"copies": "1",
"size": "3505",
"license": "mit",
"hash": -9050889472236668000,
"line_mean": 31.462962963,
"line_max": 112,
"alpha_frac": 0.5446504993,
"autogenerated": false,
"ratio": 3.6358921161825726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9567576001333544,
"avg_score": 0.022593322829805643,
"num_lines": 108
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2001-2002 - Wrap Around Code
import sys
def AlphabetToPosition(c):
"""
This function converts the given character c to its positional value [1..26]
Find out a better way to do this mapping
Learn about dictionaries, maps,..
"""
if c == 'A' :
return 1
elif c == 'B' :
return 2
elif c == 'C' :
return 3
elif c == 'D' :
return 4
elif c == 'E' :
return 5
elif c == 'F' :
return 6
elif c == 'G' :
return 7
elif c == 'H' :
return 8
elif c == 'I' :
return 9
elif c == 'J' :
return 10
elif c == 'K' :
return 11
elif c == 'L' :
return 12
elif c == 'M' :
return 13
elif c == 'N' :
return 14
elif c == 'O' :
return 15
elif c == 'P' :
return 16
elif c == 'Q' :
return 17
elif c == 'R' :
return 18
elif c == 'S' :
return 19
elif c == 'T' :
return 20
elif c == 'U' :
return 21
elif c == 'V' :
return 22
elif c == 'W' :
return 23
elif c == 'c' :
return 24
elif c == 'Y' :
return 25
elif c == 'Z' :
return 26
def PositionToAlphabet(x):
"""
Given a position value [1..26] Returns the character corresponding
to that position - This is the inverse of the previous function
"""
if x == 1:
return 'A'
elif x == 2 :
return 'B'
elif x == 3 :
return 'C'
elif x == 4 :
return 'D'
elif x == 5:
return 'E'
elif x == 6:
return 'F'
elif x == 7:
return 'G'
elif x == 8:
return 'H'
elif x == 9:
return 'I'
elif x == 10:
return 'J'
elif x == 11:
return 'K'
elif x == 12:
return 'L'
elif x == 13:
return 'M'
elif x == 14:
return 'N'
elif x == 15:
return 'O'
elif x == 16:
return 'P'
elif x == 17:
return 'Q'
elif x == 18:
return 'R'
elif x == 19:
return 'S'
elif x == 20:
return 'T'
elif x == 21:
return 'U'
elif x == 22:
return 'V'
elif x == 23:
return 'W'
elif x == 24:
return 'X'
elif x == 25:
return 'Y'
elif x == 26:
return 'Z'
def LargestIntegerFactor(i):
"""
Returns the largest integer factor of an integer less than the number
"""
factor = 1 ## 1 is always a factor for a number
k = i/2 ## We can start from half the number, factor cannot be greater than half its value
while(k>1) : ## We are looping through
if (i%k) == 0 : ## If there is no remainder for this k then then that is the largest factor
factor = k ## Because we are starting from biggest possible value to lowest
break ; ## This will go out of the loop, because we found the result
k = k - 1 ## Check the next number
return factor ## Return the value to the caller
def SumOfDigits(i):
"""
Return the sum of digits in the given integer
For example if we are given 23
This function will return 5
"""
total = 0 ## Sum of digits
s = str(i) ## Converts an integer as string
for k in range(len(s)) :
total = total + int(s[k]) ## Add the integer value of number at position k
return total ## Return the total sum
def Encode(c) :
"""
Encode the given upper case alphabet as per the rules specified in the
problem
"""
value = 0;
numerical_value = AlphabetToPosition(c) ## Convert the alphabet to its positional value
if c == 'A' or c == 'B' or c == 'C' or c == 'D' or c == 'E' :
value = 2 * numerical_value ## Twice the numerical value
elif c == 'F' or c == 'G' or c == 'H' or c == 'I' or c == 'J' :
value = (numerical_value % 3) * 5 ## Multiply by 5 the remainder of value/3
elif c == 'K' or c == 'L' or c == 'M' or c == 'N' or c == 'O' :
value = (numerical_value / 4) * 8 ## Multiply by 8 the quotient when dividing by 4
elif c == 'P' or c == 'Q' or c == 'R' or c == 'S' or c == 'T' :
value = SumOfDigits(numerical_value) * 10 ## Sum of digit times 10
elif c == 'U' or c == 'V' or c == 'W' or c == 'X' or c == 'Y' or c == 'Z' :
value = LargestIntegerFactor(numerical_value) * 12 ## Largest integer factor times 12
value = (value-1) % 26 + 1 ## Convert it back to 1..26 for wrap around values
c = PositionToAlphabet(value) ## Convert the numerical value to character to get the code
return c ## Return the encoded character
## Read in 5 alphabets
print "Enter the input characters: "
data = []
for i in range(5) :
data.append(sys.stdin.readline().strip())
## Encode the characters and print the values
print "Encoded characters: "
for x in data :
print Encode(x) ## Print the encoded character
"""
Results of execution input/output
>>>
Enter the input characters:
E
H
M
P
V
Encoded characters:
J
J
X
R
B
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/wrap_around.py",
"copies": "1",
"size": "5248",
"license": "mit",
"hash": -7842075675276503000,
"line_mean": 25.24,
"line_max": 100,
"alpha_frac": 0.5123856707,
"autogenerated": false,
"ratio": 3.5080213903743314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9400844278547775,
"avg_score": 0.02391255650531107,
"num_lines": 200
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2010-2011 - ACSL Prints
## Finger number and its corresponding whorl number
finger_number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
whorl_number = [16, 16, 8, 8, 4, 4, 2, 2, 1, 1]
def even(finger):
"""
Small function to check whether the finger is even or odd
"""
if finger%2 == 0 :
return True
else :
return False
"""
Program starts here
We need to calculate numerator and denominator of Primary Group Ratio using
the given data for persons
"""
pgr_num = [] ## Numerator values for the 5 sets of data
pgr_den = [] ## Denominator values for the 5 sets of data
print "SAMPLE INPUT"
for person in range(1, 6) :
userinput = raw_input("%d. " %(person))
data = userinput.split(',')
num = 1 ## Initialize to 1 - At least the value is 1 (we have 1+)
den = 1 ## Initialize to 1 - At least the value is 1 (we have 1+)
for v in data :
finger = int(v) ## Get the finger
if finger == 0 : ## Termination of data
break;
if finger not in finger_number : ## Check if the data is valid
print "Finger number should be a number between 1 and 10 inclusive"
continue
## Get the whorl value for the finger - note the array index starts at 0
whorl_value_for_finger = whorl_number[finger-1]
## Check of the finger is odd or even and add the whorl value accordingly
if even(finger):
num = num + whorl_value_for_finger
else :
den = den + whorl_value_for_finger
## Add the numerator and denominator data for PGR to the list
pgr_num.append(num)
pgr_den.append(den)
## Output the results in the expected format
print "SAMPLE OUTPUT:"
for person in range(5) :
print "%d. %d/%d" % (person+1, pgr_num[person], pgr_den[person] )
"""
Results of execution input/output
>>>
SAMPLE INPUT
1. 8,7,2,0
2. 0
3. 8,7,2,0
4. 8,7,2,0
5. 0
SAMPLE OUTPUT:
1. 19/3
2. 1/1
3. 19/3
4. 19/3
5. 1/1
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/fingerprintpgr.py",
"copies": "1",
"size": "2085",
"license": "mit",
"hash": 5565256886074213000,
"line_mean": 25.0625,
"line_max": 81,
"alpha_frac": 0.5976019185,
"autogenerated": false,
"ratio": 3.183206106870229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9132039451360019,
"avg_score": 0.029753714802042003,
"num_lines": 80
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2011-2012 - Draft Picks
## We need to import this library which has special values holding
## Smallest and largest floating point numbers
import sys
## We need to gather contract years and the contract salary for 5 players
contract_years = []
contract_salaries = []
print "SAMPLE INPUT"
for athelete in range(1, 6) :
userinput = raw_input("%d. " %(athelete))
data = userinput.split(',')
yr = int(data[0]); ## First data is number of years
salary = float(data[1]) * 1000000; ## second data is salary in millions converted to dollaras
contract_years.append(yr) ## Add the athlete info
contract_salaries.append(salary) ## Add the athlete info
num_annual_salaries_more_than_10m = 0
sum_annual_salary = 0 ## To calculate average we need sum
lowest_salary_per_game_in_16_game = sys.float_info.max
sum_salary_per_game_in_16_game = 0 ## To calculate average we need sum
highest_salary_per_game_in_18_game = sys.float_info.min
sum_salary_per_game_in_18_game = 0 ## To calculate average we need sum
## Gather statistics by processing each of the athletes data
for athelete in range(5) :
contract_salary = contract_salaries[athelete]
contract_year = contract_years[athelete]
## Calculate the athletes annual salary and salary per game in a 16 and 18 games
annual_salary = contract_salary/contract_year
salary_per_game_16 = annual_salary / 16
salary_per_game_18 = annual_salary / 18
## Calculate some additional data for the statistics using the above
if annual_salary > 10000000 :
num_annual_salaries_more_than_10m = num_annual_salaries_more_than_10m + 1
sum_annual_salary = sum_annual_salary + annual_salary
if salary_per_game_16 < lowest_salary_per_game_in_16_game :
lowest_salary_per_game_in_16_game = salary_per_game_16
sum_salary_per_game_in_16_game = sum_salary_per_game_in_16_game + salary_per_game_16
if salary_per_game_18 > highest_salary_per_game_in_18_game :
highest_salary_per_game_in_18_game = salary_per_game_18
sum_salary_per_game_in_18_game = sum_salary_per_game_in_18_game + salary_per_game_18
avg_annual_salary = sum_annual_salary/5
avg_salary_per_game_in_16_game = sum_salary_per_game_in_16_game / 5
avg_salary_per_game_in_18_game = sum_salary_per_game_in_18_game / 5
print "SAMPLE OUTPUT"
print "1. ", num_annual_salaries_more_than_10m
print "2. ", int(round(avg_annual_salary))
print "3. ", int(round(lowest_salary_per_game_in_16_game))
print "4. ", int(round(highest_salary_per_game_in_18_game))
print "5. ", int(round(avg_salary_per_game_in_16_game - avg_salary_per_game_in_18_game))
"""
Results of execution input/output
>>>
SAMPLE INPUT
1. 5, 57.5
2. 6, 56.5
3. 6, 72
4. 6, 60
5. 5, 51
SAMPLE OUTPUT
1. 3
2. 10623333
3. 588542
4. 666667
5. 73773
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/draft_picks.py",
"copies": "1",
"size": "2898",
"license": "mit",
"hash": 3473372900018831000,
"line_mean": 35.6962025316,
"line_max": 97,
"alpha_frac": 0.6939268461,
"autogenerated": false,
"ratio": 2.8721506442021805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8940675202012451,
"avg_score": 0.025080457657945598,
"num_lines": 79
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2012-2013 - Time Sheets
def get_time_code_as_integer(char_time_code) :
"""
This function gets an integer code corresponding alphanumeric code
entered for the time employee entered work location and the time he
exited the work location
We need this function so that we can calculate how many hours the
employee worked given his start and end code
It would be difficult to calculate this just by subtracting '1' from 'A'
So we need consecutive code for the hours in half hour increments
So 'A'-'1'= 10-1 that is 9 half hours or 4.5 hours
"""
time_code = 0
if char_time_code in ['1', '2','3', '4', '5', '6', '7', '8', '9'] :
time_code = int(char_time_code)
elif char_time_code == 'A':
time_code = 10
elif char_time_code == 'B':
time_code = 11
elif char_time_code == 'C':
time_code = 12
elif char_time_code == 'D':
time_code = 13
elif char_time_code == 'E':
time_code = 14
elif char_time_code == 'F':
time_code = 15
elif char_time_code == 'G':
time_code = 16
elif char_time_code == 'H':
time_code = 17
return time_code
def get_hrs_worked(start_time_code, end_time_code) :
"""
Get the total hours worked by an employee given his start time code and
end time code
"""
end_code = get_time_code_as_integer(end_time_code)
start_code = get_time_code_as_integer(start_time_code)
elapsed_half_hr_cnts = end_code - start_code ## Elapsed number of 1/2 hours
return elapsed_half_hr_cnts/2.0 ## Converting that to hours by dividing by 2
def get_daily_pay(location, hours_worked):
"""
Get the daily pay for an employee based on where he works and the
number of hours he worked on that day
"""
pay = 0;
if location in range(1,10) : ## 1..9
pay = 10 * hours_worked ## Hourly rate $10 with no over time pay
elif location in range(10,20) : ## 10..19
if (hours_worked > 4) :
pay = 12 * (hours_worked - 4) ## Overtime pay rate $12
hours_worked = 4
pay = pay + 8 * hours_worked ## Regular rate $8
elif location in range(20, 30) : ## 20..29
if (hours_worked > 4) :
pay = 24 * (hours_worked - 4) ## Overtime pay rate $24
hours_worked = 4
pay = pay + 12 * hours_worked ## Regular rate $12
return pay ## Return the calculated pay for the day
"""
Note main code starts here
"""
results = [] ## We get the pay for the employee in this list or array
all_employee_daily_pay_sum = 0 ## We sum up all the employees pay in this variable
print "SAMPLE INPUT:"
for employee in range(4) : ## We are doing this for 4 employees
userinput = raw_input("%d. " % (employee+1))
data = userinput.split(',')
## First data is location
location = int(data[0])
## Look at what chr and ord functions do
## I am converting the string into 1 letter character 1-9,A-H
start_time_code = chr(ord(data[1])) ## Start time code
end_time_code = chr(ord(data[2])) ## End time code
## Calculate the hours worked based on start and end time codes
hours_worked = get_hrs_worked(start_time_code, end_time_code)
## Calculate the daily pay based on location and hours worked
daily_pay = get_daily_pay(location, hours_worked)
## Add the pay to the results
results.append(daily_pay)
## Add this employees pay to the total sum
all_employee_daily_pay_sum = all_employee_daily_pay_sum + daily_pay
## Output the results in the expected format
print "SAMPLE OUTPUT:"
for employee in range(4) :
print "%d. $%.2f" % (employee+1, results[employee]) ## +1 - Note we need employee# as 1..
print "5. $%.2f" % all_employee_daily_pay_sum
"""
Results of execution input/output
>>>
SAMPLE INPUT:
1. 5,9,H
2. 11,1,7
3. 19,3,F
4. 25,2,B
SAMPLE OUTPUT:
1. $40.00
2. $24.00
3. $56.00
4. $60.00
5. $180.00
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/time_sheets.py",
"copies": "1",
"size": "4182",
"license": "mit",
"hash": 6770244559429494000,
"line_mean": 30.9312977099,
"line_max": 93,
"alpha_frac": 0.5973218556,
"autogenerated": false,
"ratio": 3.4110929853181076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45084148409181074,
"avg_score": null,
"num_lines": null
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2013-2014 - ACSL Scrabble
def letter_value(letter):
"""
Given a letter this function returns its point value as per
defined rules
"""
points = 0
if letter in ['A', 'E'] :
points = 1
elif letter in ['D', 'R'] :
points = 2
elif letter in ['B', 'M'] :
points = 3
elif letter in ['V', 'Y'] :
points = 4
elif letter in ['J', 'X'] :
points = 8
else :
print "Invalid character given: ", letter
return points
"""
Main program starts here
"""
print "SAMPLE INPUT:"
## First input is the scrabble word
userinput = raw_input("1. ")
scrabble_word = userinput.split(',')
## Next 5 inputs are start positions for the above word
letter_start_positions = [] ## To hold the start positions specified for the word
for cnt in range(2,7) :
userinput = raw_input("%d. " %(cnt))
letter_start_positions.append(int(userinput))
## Word values will hold the calculated point values if the word is placed
## in the specified positions
word_values = []
## Do the word value calculations for all start positions
for current_start_position in letter_start_positions :
current_word_value = 0 ## Calculated word value, if we start at the position
current_word_multiplier = 1 ## Word multiplier, if needed
for letter_pos in range(4) : ## for all the 4 letter characters
letter = scrabble_word[letter_pos] ## Current letter of the scrabble word
lr_value = letter_value(letter) ## Value of the letter as per rules
pos = current_start_position + letter_pos ## Actual position of the letter in the board
letter_multiplier = 1 ## Default letter value multiplier
if ((pos % 3 == 0) and ((pos/3)%2 != 0)): ## If the place is divisible by 3 but not even occurrence
letter_multiplier = 2 ## multiply letter value by 2
elif (pos % 5) == 0: ## If the place is divisible by 5
letter_multiplier = 3 ## multiply letter value by 3
elif (pos % 7) == 0: ## If the place is divisible by 7
current_word_multiplier = 2 ## Multiply the word value by 2
elif (pos % 8) == 0: ## If the place is divisible by 8
current_word_multiplier = 3 ## Multiply the word value by 3
## Sum this letter value to the word value using letter multiplier
current_word_value = current_word_value + lr_value * letter_multiplier
## Calculate the word value by multiplying with word multiplier
current_word_value = current_word_value * current_word_multiplier
## Add the word value to our results list
word_values.append(current_word_value)
## Produce output for all start positions
print "SAMPLE OUTPUT:"
for position in range(5) :
print("%d. %d" % (position+1, word_values[position]))
"""
Results of execution input/output
>>>
SAMPLE INPUT:
1. J,A,V,A
2. 1
3. 2
4. 4
5. 12
6. 21
SAMPLE OUTPUT:
1. 18
2. 17
3. 32
4. 30
5. 66
>>>
""" | {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/acsl_scrabble.py",
"copies": "1",
"size": "3077",
"license": "mit",
"hash": -8259558007406861000,
"line_mean": 31.0625,
"line_max": 107,
"alpha_frac": 0.6272343191,
"autogenerated": false,
"ratio": 3.7800982800982803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.490733259919828,
"avg_score": null,
"num_lines": null
} |
## Antony Philip (12/06/2015) - Developed for Teaching
## ACSL 2014-2015 - Navigating ACSL Land
## We simply keep the distance for the current place from its previous place
## Note for place A it is 0
distance_from_previous_place = [0, 450, 140, 120, 320, 250, 80]
def place_position (location):
"""
This function returns the position of place in the linear order
so that we can index into distance_from_previous_place
"""
position = 0
if (location == 'A'):
position = 0
elif (location == 'B'):
position = 1
elif (location == 'C'):
position = 2
elif (location == 'D'):
position = 3
elif (location == 'E'):
position = 4
elif (location == 'F'):
position = 5
elif (location == 'G'):
position = 6
return position
def distance_between(location1, location2):
"""
Distance between two positions in the linear order
We add all the distances to the previous positions
from location1 to location2
"""
position1 = place_position(location1)
position2 = place_position(location2)
distance = 0
for place in range(position1+1, position2+1) :
distance = distance + distance_from_previous_place[place]
return distance
"""
Program starts here...
"""
print "SAMPLE INPUT:"
userinput = raw_input("1. ")
data = userinput.split(',')
miles_per_gallon_gas = float(data[0]) ## MPG
cost_per_gallon_gas = float(data[1]) ## Gas cost for a gallon
average_speed_miles_per_hour = float(data[2]) ## Average speed in miles per hour
## We gather distance travelled in all the given routes
distance_travelled_all_routes = []
for route in range(2,7) :
userinput = raw_input("%d. "%(route))
data = userinput.split(',')
start_location = data[0] ## Start location
end_location = data[1] ## End location
## Distance travelled in this route
distance_travelled = distance_between(start_location, end_location)
## Add this distance to all routes
distance_travelled_all_routes.append(distance_travelled)
print "SAMPLE OUTPUT:"
for route in range(5) :
distance_travelled = distance_travelled_all_routes[route]
## Travel time is distance/avg.speed
travel_time_hours = distance_travelled / average_speed_miles_per_hour
## Total gas consumed in gallons Distance in miles/ #miles per gallon
gas_consumed_gallons = distance_travelled / miles_per_gallon_gas
## Gas cost - total gas consumes times per gallon gas cost
gas_cost_dollars = gas_consumed_gallons * cost_per_gallon_gas
## We need to display the time in hh:mm format
travel_time_display_hrs = int(travel_time_hours)
travel_time_display_minutes = int(round((travel_time_hours - travel_time_display_hrs) * 60.0))
## Print route information in required format
print "%d. %d, %02d:%02d, $%.2f" %(route+1, int(distance_travelled), travel_time_hours, travel_time_display_minutes, gas_cost_dollars)
"""
Results of execution input/output
>>>
SAMPLE INPUT:
1. 25,3.79,50
2. A,C
3. E,F
4. C,G
5. B,E
6. A,G
SAMPLE OUTPUT:
1. 590, 11:48, $89.44
2. 250, 05:00, $37.90
3. 770, 15:24, $116.73
4. 580, 11:36, $87.93
5. 1360, 27:12, $206.18
>>>
"""
| {
"repo_name": "antonyrp/teach-acsl",
"path": "contest1/acsl_navigation.py",
"copies": "1",
"size": "3315",
"license": "mit",
"hash": -5713956966338857000,
"line_mean": 28.0789473684,
"line_max": 144,
"alpha_frac": 0.6398190045,
"autogenerated": false,
"ratio": 3.256385068762279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9225266331560935,
"avg_score": 0.03418754834026872,
"num_lines": 114
} |
# ANT+ - Open Rx Scan Mode Example
#
# Open Rx demo working with OpenAnt Library (https://github.com/Tigge/openant)
# For further details on Open Rx Mode ("Continious Scann Mode"), check out the thisisant.com webpage
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
from ant.base.commons import format_list
import logging
import struct
import threading
import sys
import time
# Definition of Variables
NETWORK_KEY = [0xB9, 0xA5, 0x21, 0xFB, 0xBD, 0x72, 0xC3, 0x45]
def on_data_scan(data):
deviceNumber = data[10] * 256 + data[9]
deviceType = data[11]
ActualTime = time.time() - TimeProgramStart
print(ActualTime, "RX:", deviceNumber, ", ", deviceType, ":", format_list(data))
def on_data_ack_scan(data):
deviceNumber = data[10] * 256 + data[9]
deviceType = data[11]
ActualTime = time.time() - TimeProgramStart
print(ActualTime, "RX-Ack:", deviceNumber, ", ", deviceType, ":", format_list(data))
def main():
print("ANT+ Open Rx Scan Mode Demo")
logging.basicConfig(filename="example.log", level=logging.DEBUG)
TimeProgramStart = time.time() # get start time
node = Node()
node.set_network_key(0x00, NETWORK_KEY) # 1. Set Network Key
# CHANNEL CONFIGURATION
channel = node.new_channel(
Channel.Type.BIDIRECTIONAL_RECEIVE, 0x00, 0x00
) # 2. Assign channel
channel.on_broadcast_data = on_data_scan
channel.on_burst_data = on_data_scan
channel.on_acknowledge = on_data_scan
channel.on_acknowledge_data = on_data_ack_scan # von mir
channel.set_id(0, 0, 0) # 3. Set Channel ID
channel.set_period(0) # 4. Set Channel Period
channel.set_rf_freq(57) # 5. Set RadioFrequenzy
channel.enable_extended_messages(
1
) # 6. Enable Extended Messages, needed for OpenRxScanMode
try:
channel.open_rx_scan_mode() # 7. OpenRxScanMode
node.start()
except KeyboardInterrupt:
print("Closing ANT+ Channel")
channel.close()
node.stop()
finally:
node.stop()
logging.shutdown() # Shutdown Logger
if __name__ == "__main__":
main()
| {
"repo_name": "Tigge/openant",
"path": "examples/continuous_scan.py",
"copies": "1",
"size": "2245",
"license": "mit",
"hash": 7214754451382281000,
"line_mean": 28.7534246575,
"line_max": 100,
"alpha_frac": 0.646325167,
"autogenerated": false,
"ratio": 3.3557548579970105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9498690155668321,
"avg_score": 0.0006779738657377483,
"num_lines": 73
} |
a = {
"tournament_id":"550d1d68cd7bd10003000003", # Id of the current tournament
"game_id":"550da1cb2d909006e90004b1", # Id of the current sit'n'go game. You can use this to link a
# sequence of game states together for logging purposes, or to
# make sure that the same strategy is played for an entire game
"round":0, # Index of the current round within a sit'n'go
"bet_index":0, # Index of the betting opportunity within a round
"small_blind": 10, # The small blind in the current round. The big blind is twice the
# small blind
"current_buy_in": 320, # The amount of the largest current bet from any one player
"pot": 400, # The size of the pot (sum of the player bets)
"minimum_raise": 240, # Minimum raise amount. To raise you have to return at least:
# current_buy_in - players[in_action][bet] + minimum_raise
"dealer": 1, # The index of the player on the dealer button in this round
# The first player is (dealer+1)%(players.length)
"orbits": 7, # Number of orbits completed. (The number of times the dealer
# button returned to the same player.)
"in_action": 1, # The index of your player, in the players array
"players": [ # An array of the players. The order stays the same during the
{ # entire tournament
"id": 0, # Id of the player (same as the index)
"name": "Albert", # Name specified in the tournament config
"status": "active", # Status of the player:
# - active: the player can make bets, and win the current pot
# - folded: the player folded, and gave up interest in
# the current pot. They can return in the next round.
# - out: the player lost all chips, and is out of this sit'n'go
"version": "Default random player", # Version identifier returned by the player
"stack": 1010, # Amount of chips still available for the player. (Not including
# the chips the player bet in this round.)
"bet": 320 # The amount of chips the player put into the pot
},
{
"id": 1, # Your own player looks similar, with one extension.
"name": "piton",
"status": "active",
"version": "Default random player",
"stack": 1590,
"bet": 80,
"hole_cards": [ # The cards of the player. This is only visible for your own player
# except after showdown, when cards revealed are also included.
{
"rank": "6", # Rank of the card. Possible values are numbers 2-10 and J,Q,K,A
"suit": "hearts" # Suit of the card. Possible values are: clubs,spades,hearts,diamonds
},
{
"rank": "K",
"suit": "hearts"
}
]
},
{
"id": 2,
"name": "Chuck",
"status": "out",
"version": "Default random player",
"stack": 0,
"bet": 0
}
],
"community_cards": [ # Finally the array of community cards.
{
"rank": "4",
"suit": "hearts"
},
{
"rank": "A",
"suit": "hearts"
},
{
"rank": "6",
"suit": "hearts"
}
]
}
# odds = {"hand": [
# [{"rank": "A"}, {"rank": "A"}, {"odds": 4}, {"suit": False}],
# [{"rank": "K"}, {"rank": "K"}, {"odds": 4}, {"suit": False}],
# [{"rank": "Q"}, {"rank": "Q"}, {"odds": 4}, {"suit": False}],
# [{"rank": "J"}, {"rank": "J"}, {"odds": 3}, {"suit": False}],
# [{"rank": "10"}, {"rank": "10"}, {"odds": 3}, {"suit": False}],
# [{"rank": "9"}, {"rank": "9"}, {"odds": 3}, {"suit": False}],
# [{"rank": "8"}, {"rank": "8"}, {"odds": 2}, {"suit": False}],
# [{"rank": "7"}, {"rank": "7"}, {"odds": 2}, {"suit": False}],
# [{"rank": "6"}, {"rank": "6"}, {"odds": 2}, {"suit": False}],
# [{"rank": "5"}, {"rank": "5"}, {"odds": 2}, {"suit": False}],
# [{"rank": "4"}, {"rank": "4"}, {"odds": 2}, {"suit": False}],
# [{"rank": "3"}, {"rank": "3"}, {"odds": 1}, {"suit": False}],
# [{"rank": "2"}, {"rank": "2"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "K"}, {"odds": 3}, {"suit": False}],
# [{"rank": "A"}, {"rank": "Q"}, {"odds": 3}, {"suit": False}],
# [{"rank": "A"}, {"rank": "J"}, {"odds": 2}, {"suit": False}],
# [{"rank": "A"}, {"rank": "10"}, {"odds": 2}, {"suit": False}],
# [{"rank": "A"}, {"rank": "9"}, {"odds": 2}, {"suit": False}],
# [{"rank": "A"}, {"rank": "8"}, {"odds": 2}, {"suit": False}],
# [{"rank": "A"}, {"rank": "7"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "6"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "5"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "4"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "3"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "2"}, {"odds": 1}, {"suit": False}],
# [{"rank": "A"}, {"rank": "K"}, {"odds": 4}, {"suit": True}],
# [{"rank": "A"}, {"rank": "Q"}, {"odds": 3}, {"suit": True}],
# [{"rank": "A"}, {"rank": "J"}, {"odds": 3}, {"suit": True}],
# [{"rank": "A"}, {"rank": "10"}, {"odds": 2}, {"suit": True}],
# [{"rank": "A"}, {"rank": "9"}, {"odds": 2}, {"suit": True}],
# [{"rank": "A"}, {"rank": "8"}, {"odds": 2}, {"suit": True}],
# [{"rank": "A"}, {"rank": "7"}, {"odds": 2}, {"suit": True}],
# [{"rank": "A"}, {"rank": "6"}, {"odds": 1}, {"suit": True}],
# [{"rank": "A"}, {"rank": "5"}, {"odds": 1}, {"suit": True}],
# [{"rank": "A"}, {"rank": "4"}, {"odds": 1}, {"suit": True}],
# [{"rank": "A"}, {"rank": "3"}, {"odds": 1}, {"suit": True}],
# [{"rank": "A"}, {"rank": "2"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "Q"}, {"odds": 2}, {"suit": True}],
# [{"rank": "K"}, {"rank": "J"}, {"odds": 2}, {"suit": True}],
# [{"rank": "K"}, {"rank": "10"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "9"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "8"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "7"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "6"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "5"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "4"}, {"odds": 1}, {"suit": True}],
# [{"rank": "K"}, {"rank": "3"}, {"odds": 1}, {"suit": True}],
# [{"rank": "Q"}, {"rank": "J"}, {"odds": 1}, {"suit": True}],
# [{"rank": "Q"}, {"rank": "10"}, {"odds": 1}, {"suit": True}],
# [{"rank": "Q"}, {"rank": "9"}, {"odds": 1}, {"suit": True}],
# [{"rank": "Q"}, {"rank": "8"}, {"odds": 1}, {"suit": True}],
# [{"rank": "Q"}, {"rank": "7"}, {"odds": 1}, {"suit": True}],
# [{"rank": "J"}, {"rank": "10"}, {"odds": 1}, {"suit": True}],
# [{"rank": "J"}, {"rank": "9"}, {"odds": 1}, {"suit": True}],
# [{"rank": "J"}, {"rank": "8"}, {"odds": 1}, {"suit": True}],
# [{"rank": "J"}, {"rank": "7"}, {"odds": 1}, {"suit": True}]
# ]}
class Player:
VERSION = "kamikaze"
low_cards = ["2", "3", "4", "5", "6", "7"]
high_cards = ["10", "J", "Q", "K", "A"]
allvalues = []
hand = []
commun_cards = []
handcard1 = []
handcard2 = []
suit = []
def betRequest(self, game_state):
return self.think(game_state)
def showdown(self, game_state):
pass
# def handreturn(self, game_state):
# self.hand = []
# for player in game_state["players"]:
# if player["name"] == "piton":
# for cards in player["hole_cards"]:
# self.hand.append(cards)
def communreturn(self, game_state):
self.commun_cards = []
for cards in game_state["community_cards"]:
self.commun_cards.append(cards)
# def checkifhandpair(self):
# if self.hand[0]["rank"] == self.hand[1]["rank"]:
# return True
#
# def checkifpair(self):
# for flop in self.commun_cards:
# if self.hand[0]["rank"] == flop["rank"]:
# self.handcard1.append(flop["rank"])
# for card in self.hand:
# for flop in self.commun_cards:
# if card["rank"] == flop["rank"]:
# self.state.append(card)
# self.state.append(flop)
# return len(self.state)
# def ifsuit(self):
# if self.hand[0]["suit"] == self.hand[1]["suit"]:
# for card in self.hand:
# for flop in self.commun_cards:
# if card["suit"] == flop["suit"]:
# self.suit.append(card)
# if len(self.suit) >= 5:
# return True
def think(self, game_state):
for player in game_state["players"]:
if player["name"] == "piton":
for cards in player["hole_cards"]:
self.hand.append(cards)
if self.hand[0]["rank"] in ["A", "K", "Q"] and self.hand[1]["rank"] in ["A", "K", "Q"]:
return 6000
else:
return 0
# if game_state["pot"] > 100:
# if self.hand[0]["rank"] in self.high_cards and self.hand[1]["rank"] in self.high_cards:
# bet = 6000
# else:
# bet = 0
#
# if game_state["pot"] > 100:
# if self.hand[0]["rank"] == self.hand[1]["rank"]:
# bet = 6000
# else:
# if self.hand[0]["rank"] == self.hand[1]["rank"]:
# bet = game_state["minimum_raise"]
# elif len(game_state["community_cards"]) == 3:
# for card in self.hand:
# self.allvalues.append(card["rank"])
# for card in self.commun_cards:
# self.allvalues.append(card["rank"])
# if len(set(self.allvalues)) == 4:
# bet = game_state["minimum_raise"]
# elif len(set(self.allvalues)) == 3:
# bet = 6000
# elif len(set(self.allvalues)) == 2:
# bet = 6000
# elif self.ifsuit():
# if game_state["minimum_raise"] < 150:
# bet = game_state["minimum_raise"]
# else:
# bet = 6000
# else:
# bet = 0
# else:
# bet = 6000
#
# return bet
#
| {
"repo_name": "turbek/poker-player-piton",
"path": "player.py",
"copies": "1",
"size": "12359",
"license": "mit",
"hash": 2542364105092288000,
"line_mean": 47.8498023715,
"line_max": 121,
"alpha_frac": 0.380936969,
"autogenerated": false,
"ratio": 3.578170237405906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9447668860273608,
"avg_score": 0.002287669226459531,
"num_lines": 253
} |
"""ANTS Apply Transforms interface
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from builtins import range
import os
from .base import ANTSCommand, ANTSCommandInputSpec
from ..base import TraitedSpec, File, traits, isdefined, InputMultiPath
from ...utils.filemanip import split_filename
class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(4, 3, argstr='%d', usedefault=True,
desc='image dimension (3 or 4)', position=1)
input_image = File(argstr='%s', mandatory=True, copyfile=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
out_postfix = traits.Str('_wtsimt', argstr='%s', usedefault=True,
desc=('Postfix that is prepended to all output '
'files (default = _wtsimt)'))
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-Bspline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True, copyfile=False)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert. '
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series'))
class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpTimeSeriesImageMultiTransform(ANTSCommand):
"""Warps a time-series from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform
>>> wtsimt = WarpTimeSeriesImageMultiTransform()
>>> wtsimt.inputs.input_image = 'resting.nii'
>>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wtsimt.cmdline
'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \
ants_Affine.txt'
"""
_cmd = 'WarpTimeSeriesImageMultiTransform'
input_spec = WarpTimeSeriesImageMultiTransformInputSpec
output_spec = WarpTimeSeriesImageMultiTransformOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'out_postfix':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return name + val + ext
if opt == 'transformation_series':
series = []
affine_counter = 0
for transformation in val:
if 'Affine' in transformation and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += ['-i'],
series += [transformation]
return ' '.join(series)
return super(WarpTimeSeriesImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.input_image))
outputs['output_image'] = os.path.join(os.getcwd(),
''.join((name,
self.inputs.out_postfix,
ext)))
return outputs
def _run_interface(self, runtime, correct_return_codes=[0]):
runtime = super(WarpTimeSeriesImageMultiTransform, self)._run_interface(runtime, correct_return_codes=[0, 1])
if "100 % complete" not in runtime.stdout:
self.raise_exception(runtime)
return runtime
class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=True,
desc='image dimension (2 or 3)', position=1)
input_image = File(argstr='%s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'), position=2)
output_image = File(genfile=True, hash_files=False, argstr='%s',
desc='name of the output warped image', position=3, xor=['out_postfix'])
out_postfix = File("_wimt", usedefault=True, hash_files=False,
desc=('Postfix that is prepended to all output '
'files (default = _wimt)'), xor=['output_image'])
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-BSpline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True, position=-1)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert.'
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series. Note that indexing '
'starts with 1 and does not include warp fields. Affine '
'transformations are distinguished '
'from warp fields by the word "affine" included in their filenames.'))
class WarpImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpImageMultiTransform(ANTSCommand):
"""Warps an image from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpImageMultiTransform
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'structural.nii'
>>> wimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wimt.cmdline
'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \
ants_Affine.txt'
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'diffusion_weighted.nii'
>>> wimt.inputs.reference_image = 'functional.nii'
>>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', \
'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt']
>>> wimt.inputs.invert_affine = [1]
>>> wimt.cmdline
'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \
-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt'
"""
_cmd = 'WarpImageMultiTransform'
input_spec = WarpImageMultiTransformInputSpec
output_spec = WarpImageMultiTransformOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return ''.join((name, self.inputs.out_postfix, ext))
return None
def _format_arg(self, opt, spec, val):
if opt == 'transformation_series':
series = []
affine_counter = 0
for transformation in val:
if "affine" in transformation.lower() and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += '-i',
series += [transformation]
return ' '.join(series)
return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_image):
outputs['output_image'] = os.path.abspath(self.inputs.output_image)
else:
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
class ApplyTransformsInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d',
desc=('This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, antsWarp tries to infer the '
'dimensionality from the input image.'))
input_image_type = traits.Enum(0, 1, 2, 3,
argstr='--input-image-type %d',
desc=('Option specifying the input image '
'type of scalar (default), vector, '
'tensor, or time series.'))
input_image = File(argstr='--input %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'),
exists=True)
output_image = traits.Str(argstr='--output %s', desc='output file name',
genfile=True, hash_files=False)
out_postfix = traits.Str("_trans", usedefault=True,
desc=('Postfix that is appended to all output '
'files (default = _trans)'))
reference_image = File(argstr='--reference-image %s', mandatory=True,
desc='reference image space that you wish to warp INTO',
exists=True)
interpolation = traits.Enum('Linear',
'NearestNeighbor',
'CosineWindowedSinc',
'WelchWindowedSinc',
'HammingWindowedSinc',
'LanczosWindowedSinc',
'MultiLabel',
'Gaussian',
'BSpline',
argstr='%s', usedefault=True)
interpolation_parameters = traits.Either(traits.Tuple(traits.Int()), # BSpline (order)
traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha)
traits.Float())
)
transforms = InputMultiPath(
File(exists=True), argstr='%s', mandatory=True, desc='transform files: will be applied in reverse order. For example, the last specified transform will be applied first')
invert_transform_flags = InputMultiPath(traits.Bool())
default_value = traits.Float(0.0, argstr='--default-value %g', usedefault=True)
print_out_composite_warp_file = traits.Bool(False, requires=["output_image"],
desc='output a composite warp file instead of a transformed image')
float = traits.Bool(argstr='--float %d', default=False, desc='Use float instead of double for computations.')
class ApplyTransformsOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class ApplyTransforms(ANTSCommand):
"""ApplyTransforms, applied to an input image, transforms it according to a
reference image and a transform (or a set of transforms).
Examples
--------
>>> from nipype.interfaces.ants import ApplyTransforms
>>> at = ApplyTransforms()
>>> at.inputs.dimension = 3
>>> at.inputs.input_image = 'moving1.nii'
>>> at.inputs.reference_image = 'fixed1.nii'
>>> at.inputs.output_image = 'deformed_moving1.nii'
>>> at.inputs.interpolation = 'Linear'
>>> at.inputs.default_value = 0
>>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat']
>>> at.inputs.invert_transform_flags = [False, False]
>>> at.cmdline
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation Linear \
--output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \
--transform [ trans.mat, 0 ]'
>>> at1 = ApplyTransforms()
>>> at1.inputs.dimension = 3
>>> at1.inputs.input_image = 'moving1.nii'
>>> at1.inputs.reference_image = 'fixed1.nii'
>>> at1.inputs.output_image = 'deformed_moving1.nii'
>>> at1.inputs.interpolation = 'BSpline'
>>> at1.inputs.interpolation_parameters = (5,)
>>> at1.inputs.default_value = 0
>>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat']
>>> at1.inputs.invert_transform_flags = [False, False]
>>> at1.cmdline
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation BSpline[ 5 ] \
--output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \
--transform [ trans.mat, 0 ]'
"""
_cmd = 'antsApplyTransforms'
input_spec = ApplyTransformsInputSpec
output_spec = ApplyTransformsOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + self.inputs.out_postfix + ext
return output
return None
def _get_transform_filenames(self):
retval = []
for ii in range(len(self.inputs.transforms)):
if isdefined(self.inputs.invert_transform_flags):
if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags):
invert_code = 1 if self.inputs.invert_transform_flags[
ii] else 0
retval.append("--transform [ %s, %d ]" %
(self.inputs.transforms[ii], invert_code))
else:
raise Exception(("ERROR: The useInverse list must have the same number "
"of entries as the transformsFileName list."))
else:
retval.append("--transform %s" % self.inputs.transforms[ii])
return " ".join(retval)
def _get_output_warped_filename(self):
if isdefined(self.inputs.print_out_composite_warp_file):
return "--output [ %s, %d ]" % (self._gen_filename("output_image"),
int(self.inputs.print_out_composite_warp_file))
else:
return "--output %s" % (self._gen_filename("output_image"))
def _format_arg(self, opt, spec, val):
if opt == "output_image":
return self._get_output_warped_filename()
elif opt == "transforms":
return self._get_transform_filenames()
elif opt == 'interpolation':
if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \
isdefined(self.inputs.interpolation_parameters):
return '--interpolation %s[ %s ]' % (self.inputs.interpolation,
', '.join([str(param)
for param in self.inputs.interpolation_parameters]))
else:
return '--interpolation %s' % self.inputs.interpolation
return super(ApplyTransforms, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
class ApplyTransformsToPointsInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d',
desc=('This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, antsWarp tries to infer the '
'dimensionality from the input image.'))
input_file = File(argstr='--input %s', mandatory=True,
desc=("Currently, the only input supported is a csv file with "
"columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers."
"The points should be defined in physical space."
"If in doubt how to convert coordinates from your files to the space"
"required by antsApplyTransformsToPoints try creating/drawing a simple"
"label volume with only one voxel set to 1 and all others set to 0."
"Write down the voxel coordinates. Then use ImageMaths LabelStats to find"
"out what coordinates for this voxel antsApplyTransformsToPoints is"
"expecting."),
exists=True)
output_file = traits.Str(argstr='--output %s',
desc='Name of the output CSV file', name_source=['input_file'],
hash_files=False, name_template='%s_transformed.csv')
transforms = traits.List(File(exists=True), argstr='%s', mandatory=True,
desc='transforms that will be applied to the points')
invert_transform_flags = traits.List(traits.Bool(),
desc='list indicating if a transform should be reversed')
class ApplyTransformsToPointsOutputSpec(TraitedSpec):
output_file = File(exists=True, desc='csv file with transformed coordinates')
class ApplyTransformsToPoints(ANTSCommand):
"""ApplyTransformsToPoints, applied to an CSV file, transforms coordinates
using provided transform (or a set of transforms).
Examples
--------
>>> from nipype.interfaces.ants import ApplyTransforms
>>> at = ApplyTransformsToPoints()
>>> at.inputs.dimension = 3
>>> at.inputs.input_file = 'moving.csv'
>>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz']
>>> at.inputs.invert_transform_flags = [False, False]
>>> at.cmdline
'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv \
--transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]'
"""
_cmd = 'antsApplyTransformsToPoints'
input_spec = ApplyTransformsToPointsInputSpec
output_spec = ApplyTransformsToPointsOutputSpec
def _get_transform_filenames(self):
retval = []
for ii in range(len(self.inputs.transforms)):
if isdefined(self.inputs.invert_transform_flags):
if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags):
invert_code = 1 if self.inputs.invert_transform_flags[
ii] else 0
retval.append("--transform [ %s, %d ]" %
(self.inputs.transforms[ii], invert_code))
else:
raise Exception(("ERROR: The useInverse list must have the same number "
"of entries as the transformsFileName list."))
else:
retval.append("--transform %s" % self.inputs.transforms[ii])
return " ".join(retval)
def _format_arg(self, opt, spec, val):
if opt == "transforms":
return self._get_transform_filenames()
return super(ApplyTransformsToPoints, self)._format_arg(opt, spec, val)
| {
"repo_name": "FCP-INDI/nipype",
"path": "nipype/interfaces/ants/resampling.py",
"copies": "6",
"size": "21783",
"license": "bsd-3-clause",
"hash": 2337702489026583000,
"line_mean": 49.5406032483,
"line_max": 178,
"alpha_frac": 0.5647982372,
"autogenerated": false,
"ratio": 4.340972499003587,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7905770736203587,
"avg_score": null,
"num_lines": null
} |
# ANT+ - Stride Based Speed and Distance Sensor Example
#
# SDM demo working with OpenAnt Library (https://github.com/Tigge/openant)
# using feature of:
# - acting as Transmitting Device (TX-Broadcast)
# - gracefully close ANT-Channels
#
# For further details on Speed & Distance Sensor, check out the thisisant.com webpage
import array
import threading
import logging
import time
from ant.easy.node import Node
from ant.easy.channel import Channel
from ant.base.message import Message
from ant.base.commons import format_list
import struct
import sys
# Definition of Variables
NETWORK_KEY = [0xB9, 0xA5, 0x21, 0xFB, 0xBD, 0x72, 0xC3, 0x45]
Device_Type = 124 # 124 = Stride & Distance Sensor
Device_Number = 12345 # Change if you need.
Channel_Period = 8134
Channel_Frequency = 57
# Fictive Config of Treadmill
TreadmillSpeed = 2.777 # m/s => 10km/h
TreadmillCadence = 160
##########################################################################
class AntSendDemo:
def __init__(self):
self.ANTMessageCount = 0
self.ANTMessagePayload = array.array("B", [0, 0, 0, 0, 0, 0, 0, 0])
# Init Variables, needed
self.LastStrideTime = 0
self.StridesDone = 0
self.DistanceAccu = 0
self.Speed_Last = 0
self.TimeRollover = 0
self.TimeProgramStart = time.time()
self.LastTimeEvent = time.time()
# Building up the Datapages
# This is just for demo purpose and can/will look diverent for every implementation
def Create_Next_DataPage(self):
# Define Variables
UpdateLatency_7 = 0
self.ANTMessageCount += 1
# Time Calculations
self.ElapsedSseconds = time.time() - self.LastTimeEvent
self.LastTimeEvent = time.time()
UpdateLatency_7 += self.ElapsedSseconds # 1Second / 32 = 0,03125
UL_7 = int(UpdateLatency_7 / 0.03125)
# Stride Count, Accumulated strides.
# This value is incremented once for every two footfalls.
StrideCountUpValue = 60.0 / (TreadmillCadence / 2.0) # In our Example 0,75
while self.LastStrideTime > StrideCountUpValue:
self.StridesDone += 1
self.LastStrideTime -= StrideCountUpValue
self.LastStrideTime += self.ElapsedSseconds
if self.StridesDone > 255:
self.StridesDone -= 255
# DISTANCE
# Accumulated distance, in m-Meters, Rollover = 256
self.DistanceBetween = self.ElapsedSseconds * TreadmillSpeed
self.DistanceAccu += (
self.DistanceBetween
) # Add Distance beetween 2 ANT+ Ticks to Accumulated Distance
if self.DistanceAccu > 255:
self.DistanceAccu -= 255
self.distance_H = int(self.DistanceAccu) # just round it to INT
self.DistanceLow_HEX = int((self.DistanceAccu - self.distance_H) * 16)
# SPEED - Berechnung
self.var_speed_ms_H = int(TreadmillSpeed) # INT-Value
self.var_speed_ms_L = int(TreadmillSpeed * 1000) - (self.var_speed_ms_H * 1000)
self.var_speed_ms_L_HEX = int((TreadmillSpeed - self.var_speed_ms_H) * 256)
# TIME (chnages to Distance or speed will effect if This byte needs to be calculated (<= check Specifikation)
if self.Speed_Last != TreadmillSpeed or self.Distance_Last != self.DistanceAccu:
self.TimeRollover += self.ElapsedSseconds
if self.TimeRollover > 255:
self.TimeRollover -= 255
self.TimeRollover_H = int(self.TimeRollover)
# only integer
if self.TimeRollover_H > 255:
self.TimeRollover_H = 255
self.TimeRollover_L_HEX = int((self.TimeRollover - self.TimeRollover_H) * 200)
if self.TimeRollover_L_HEX > 255:
self.TimeRollover_L_HEX -= 255
self.Speed_Last = TreadmillSpeed
self.Distance_Last = self.DistanceAccu
if self.ANTMessageCount < 3:
self.ANTMessagePayload[0] = 80 # DataPage 80
self.ANTMessagePayload[1] = 0xFF
self.ANTMessagePayload[2] = 0xFF # Reserved
self.ANTMessagePayload[3] = 1 # HW Revision
self.ANTMessagePayload[4] = 1
self.ANTMessagePayload[5] = 1 # Manufacturer ID
self.ANTMessagePayload[6] = 1
self.ANTMessagePayload[7] = 1 # Model Number
elif self.ANTMessageCount > 64 and self.ANTMessageCount < 67:
self.ANTMessagePayload[0] = 81 # DataPage 81
self.ANTMessagePayload[1] = 0xFF
self.ANTMessagePayload[2] = 0xFF # Reserved
self.ANTMessagePayload[3] = 1 # SW Revision
self.ANTMessagePayload[4] = 0xFF
self.ANTMessagePayload[5] = 0xFF # Serial Number
self.ANTMessagePayload[6] = 0xFF
self.ANTMessagePayload[7] = 0xFF # Serial Number
else:
self.ANTMessagePayload[0] = 0x01 # Data Page 1
self.ANTMessagePayload[1] = self.TimeRollover_L_HEX
self.ANTMessagePayload[2] = self.TimeRollover_H # Reserved
self.ANTMessagePayload[3] = self.distance_H # Distance Accumulated INTEGER
# BYTE 4 - Speed-Integer & Distance-Fractional
self.ANTMessagePayload[4] = (
self.DistanceLow_HEX * 16 + self.var_speed_ms_H
) # Instaneus Speed, Note: INTEGER
self.ANTMessagePayload[
5
] = self.var_speed_ms_L_HEX # Instaneus Speed, Fractional
self.ANTMessagePayload[6] = self.StridesDone # Stride Count
self.ANTMessagePayload[7] = UL_7 # Update Latency
# ANTMessageCount reset
if self.ANTMessageCount > 131:
self.ANTMessageCount = 0
return self.ANTMessagePayload
# TX Event
def on_event_tx(self, data):
ANTMessagePayload = self.Create_Next_DataPage()
self.ActualTime = time.time() - self.TimeProgramStart
# ANTMessagePayload = array.array('B', [1, 255, 133, 128, 8, 0, 128, 0]) # just for Debuggung pourpose
self.channel.send_broadcast_data(
self.ANTMessagePayload
) # Final call for broadcasting data
print(
self.ActualTime,
"TX:",
Device_Number,
",",
Device_Type,
":",
format_list(ANTMessagePayload),
)
# Open Channel
def OpenChannel(self):
self.node = Node() # initialize the ANT+ device as node
# CHANNEL CONFIGURATION
self.node.set_network_key(0x00, NETWORK_KEY) # set network key
self.channel = self.node.new_channel(
Channel.Type.BIDIRECTIONAL_TRANSMIT, 0x00, 0x00
) # Set Channel, Master TX
self.channel.set_id(
Device_Number, Device_Type, 5
) # set channel id as <Device Number, Device Type, Transmission Type>
self.channel.set_period(Channel_Period) # set Channel Period
self.channel.set_rf_freq(Channel_Frequency) # set Channel Frequency
# Callback function for each TX event
self.channel.on_broadcast_tx_data = self.on_event_tx
try:
self.channel.open() # Open the ANT-Channel with given configuration
self.node.start()
except KeyboardInterrupt:
print("Closing ANT+ Channel...")
self.channel.close()
self.node.stop()
finally:
print("Final checking...")
# not sure if there is anything else we should check?! :)
###########################################################################################################################
def main():
print("ANT+ Send Broadcast Demo")
logging.basicConfig(
filename="example.log", level=logging.DEBUG
) # just for Debugging purpose, outcomment this in live version
ant_senddemo = AntSendDemo()
try:
ant_senddemo.OpenChannel() # start
except KeyboardInterrupt:
print("Closing ANT+ Channel!")
finally:
print("Finally...")
logging.shutdown() # Shutdown Logger
print("Close demo...")
if __name__ == "__main__":
main()
| {
"repo_name": "Tigge/openant",
"path": "examples/speed_and_distance.py",
"copies": "1",
"size": "8403",
"license": "mit",
"hash": -973838366314077400,
"line_mean": 35.5133928571,
"line_max": 123,
"alpha_frac": 0.5884803047,
"autogenerated": false,
"ratio": 3.790257104194858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48787374088948576,
"avg_score": null,
"num_lines": null
} |
# A number chain is created by continuously adding the square
# of the digits in a number to form a new number until it has
# been seen before.
# For example,
# 44 -> 32 -> 13 -> 10 -> 1 -> 1
# 85 -> 89 -> 145 -> 42 -> 20 -> 4 -> 16 -> 37 -> 58 -> 89
# Therefore any chain that arrives at 1 or 89 will become stuck
# in an endless loop. What is most amazing is that EVERY starting
# number will eventually arrive at 1 or 89.
# How many starting numbers below ten million will arrive at 89?
from collections import Counter
from math import factorial
from itertools import combinations_with_replacement
def is_ended_eighty_nine(num):
if num == 0:
return False
while num <> 89 and num <> 1:
num = sum([int(x) ** 2 for x in str(num)])
return num == 89
ans = 0
for nums in combinations_with_replacement(range(10), 7):
if is_ended_eighty_nine(sum(x ** 2 for x in nums)):
count = [v for k, v in Counter(nums).items()]
ans += factorial(sum(count)) / reduce(lambda x, y: x * y, [factorial(n) for n in count])
print ans
| {
"repo_name": "cloudzfy/euler",
"path": "src/92.py",
"copies": "1",
"size": "1031",
"license": "mit",
"hash": 5958949826176366000,
"line_mean": 29.3235294118,
"line_max": 90,
"alpha_frac": 0.6779825412,
"autogenerated": false,
"ratio": 3.191950464396285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9296502639528481,
"avg_score": 0.014686073213560594,
"num_lines": 34
} |
# A number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.
#
# For example,
#
# 44 → 32 → 13 → 10 → 1 → 1
# 85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
#
# Therefore any chain that arrives at 1 or 89 will become stuck in an endless loop. What is most amazing is that EVERY starting number will eventually arrive at 1 or 89.
#
# How many starting numbers below ten million will arrive at 89?
def f(n):
sum_of_squares = 0
for d in str(n):
sum_of_squares += int(d)**2
return sum_of_squares
def not_cycled_yet(n):
return (n != 1) and (n != 89)
def f_seq(n):
if n == 0:
return []
seq = [n]
while not_cycled_yet(n):
n = f(n)
seq.append(n)
return seq
def calc_terminations(seq, precalced):
terminal = seq[len(seq)-1]
for e in seq:
if e not in precalced:
precalced[e] = terminal
else:
break
return terminal
def calc(UB):
terminations = {}
eightynines = 0
for n in xrange(UB):
n += 1
seq = f_seq(n)
terminations[n] = calc_terminations(seq, terminations)
if terminations[n] == 89:
eightynines += 1
return eightynines | {
"repo_name": "bgwines/project-euler",
"path": "src/solved/problem92.py",
"copies": "1",
"size": "1172",
"license": "bsd-3-clause",
"hash": 5078906206434369000,
"line_mean": 22.8541666667,
"line_max": 169,
"alpha_frac": 0.6582167832,
"autogenerated": false,
"ratio": 2.4923747276688455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8305231288486037,
"avg_score": 0.06907204447656186,
"num_lines": 48
} |
# a "number" class with fuzzy values
class FuzzNum(object):
# a floating point number with a fuzzy value
def __init__(self, median = 0., variance = 0.):
self.med = median
# radius must be zero or positive
self.var = max(variance,0)
def __str__(self):
return "{0} +- {1}".format(self.med, self.var)
# operator pairs for numeric emulation
numeric_operator_pairs = (('add',"+"),('sub',"-"),('mul',"*"),
('truediv',"/"),('floordiv',"//"),('mod',"%"),
('pow',"**"))
# declare all the left numeric methods
for pair in numeric_operator_pairs:
exec("def __{0}__(self, other): return FuzzNum(self.med {1} other.med,\
self.var {1} other.var)".format(pair[0],pair[1]))
# declare all the right numeric methods
for pair in numeric_operator_pairs:
exec("def __r{0}__(self, other): return FuzzNum(other.med {1} self.med,\
other.var {1} self.var)".format(pair[0],pair[1]))
# declare all the comparison overloads
def __eq__(self, other):
diff = abs(self.med - other.med)
slop = self.var + other.var
return diff <= slop
def __ne__(self, other):
return not self == other
# operator pairs for comparison overloads
comparison_operator_pairs_simple = (('lt','<'),('gt','>'))
for pair in comparison_operator_pairs_simple:
exec("def __{0}__(self, other): return self.med {1} other.med".format(pair[0],pair[1]))
# declare the compound comparison overloads
def __le__(self, other): return self == other or self < other
def __ge__(self, other): return self == other or self > other
#----------------------------------------------------
# some test code below
#----------------------------------------------------
test_stuff = ("a","b","a == b","a < b","a >= b","a > b","a + b",
"a - b","b - a","a - a","a / b","a // b","a * b",
"b / a","b // a")
a = FuzzNum(3,2.5)
b = FuzzNum(7.1,3)
for stuff in test_stuff:
exec('print("{0}:",{0})'.format(stuff))
| {
"repo_name": "dudecon/Gatheling",
"path": "Old Reference Files/Fuzzy_Number.py",
"copies": "1",
"size": "2094",
"license": "unlicense",
"hash": -51167928862811070,
"line_mean": 40.0588235294,
"line_max": 95,
"alpha_frac": 0.5210124164,
"autogenerated": false,
"ratio": 3.519327731092437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9442178380469046,
"avg_score": 0.01963235340467814,
"num_lines": 51
} |
# A number is Sparse if there are no two adjacent 1s in its binary representation.
# Given a number n, find the smallest Sparse number which greater than or equal to n.
# eg. 5 (binary representation: 101) is sparse, but 6 (binary representation: 110) is not sparse.
# Example
# Given n = 6, return 8
# Next Sparse Number is 8
# Given n = 4, return 4
# Next Sparse Number is 4
# Given n = 38, return 40
# Next Sparse Number is 40
# Given n = 44, return 64
# Next Sparse Number is 64
# idea:
# <------
# 01010001011101
# ||
# 01010001100000
# ||
# 01010010000000
class Solution:
"""
@param: : a number
@return: return the next sparse number behind x
"""
def nextSparseNum(self, x):
binx = reversed(bin(x)[2:])
list_binx = list(binx)
last_final = 0
for i in range(1, len(binx) - 1):
if list_binx[i] == '1' and list_binx[i-1] == '1' and list_binx[i+1] != '1':
list_binx[i+1] = '1';
for j in range(i, last_final, -1):
list_binx[j] = '0'
last_final = i + 1
return int("".join(list_binx), 2) | {
"repo_name": "AtlantisFox/Green-Point-Challenge",
"path": "leetcode/next-sparse-number.py",
"copies": "1",
"size": "1114",
"license": "mit",
"hash": -3192023976232195000,
"line_mean": 25.5476190476,
"line_max": 97,
"alpha_frac": 0.5897666068,
"autogenerated": false,
"ratio": 3.0189701897018972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4108736796501897,
"avg_score": null,
"num_lines": null
} |
"""A number of GUIDs with invalid or missing referents
were found during the mongo -> postgres migration.
These GUIDS were parsed from the migration logs and written to scripts/orphaned_guids.json.
This script adds a field, `is_orphaned` to these GUIDS and sets it to True so that they
can be skipped during the mongo -> postgres migration.
"""
import json
import sys
import os
import logging
from scripts import utils as script_utils
from framework.mongo import database
logger = logging.getLogger(__name__)
HERE = os.path.dirname(os.path.abspath(__file__))
def main(dry=True):
with open(os.path.join(HERE, 'orphaned_guids.json'), 'r') as fp:
orphaned_guids = json.load(fp)
for collection_name, guids in orphaned_guids.iteritems():
logger.info('Updating {} GUIDs that point to the collection: {}'.format(
len(guids), collection_name
))
if not dry:
database.guid.update(
{'_id': {'$in': guids}},
{'$set': {'is_orphaned': True}},
multi=True
)
if __name__ == '__main__':
dry = '--dry' in sys.argv
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(name)s] %(levelname)s: %(message)s',
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| {
"repo_name": "cwisecarver/osf.io",
"path": "scripts/mark_orphaned_guids.py",
"copies": "7",
"size": "1452",
"license": "apache-2.0",
"hash": -8794698365640142000,
"line_mean": 29.25,
"line_max": 91,
"alpha_frac": 0.6391184573,
"autogenerated": false,
"ratio": 3.7714285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7910547028728571,
"avg_score": null,
"num_lines": null
} |
"""A number of hard-to-fit functions taken from Wikipedia:
http://en.wikipedia.org/wiki/Test_functions_for_optimization
The number of iterations for convergence may differ, so if these tests
fail, carefully check if this is simply because the test fit has not
converged yet. (A.k.a. tests need to be improved.)
"""
import mpyfit
import unittest
import numpy
class SphereFunction(unittest.TestCase):
@staticmethod
def func(p):
return p[0]**2 + p[1]**2
def test_fit(self):
p = [1, 1]
p, result = mpyfit.fit(self.func, p)
self.assertEqual(result['status'][0], 5)
self.assertAlmostEqual(p[0], 0)
self.assertAlmostEqual(p[1], 0)
def test_fit10000iterations(self):
p = [1, 1]
p, result = mpyfit.fit(self.func, p, maxiter=50000)
self.assertEqual(result['status'][0], 4)
self.assertAlmostEqual(p[0], 0)
self.assertAlmostEqual(p[1], 0)
class RosenbrockFunction(unittest.TestCase):
@staticmethod
def func(p):
return (1 - p[0])**2 + 100 * (p[1] - p[0]*p[0])**2
def test_fit(self):
p = [0, 0]
p, result = mpyfit.fit(self.func, p)
# Does not even come close
self.assertNotAlmostEqual(round(p[0], 1), 1.0)
self.assertNotAlmostEqual(round(p[1], 1), 1.0)
def test_fit50000iterations(self):
p = [0, 0]
p, result = mpyfit.fit(self.func, p, maxiter=50000)
self.assertAlmostEqual(p[0], 1., places=4)
self.assertAlmostEqual(p[1], 1., places=4)
class BealesFunction(unittest.TestCase):
@staticmethod
def func(p):
return ((1.5 - p[0] + p[0]*p[1])**2 +
(2.25 - p[0] + p[0]*p[1]*p[1])**2 +
(2.625 - p[0] + p[0]*p[1]*p[1]*p[1])**2)
def test_fit(self):
p = [0, 0]
p, result = mpyfit.fit(self.func, p, maxiter=10000)
self.assertEqual(result['status'][0], 2)
self.assertAlmostEqual(p[0], 3.0, places=5)
self.assertAlmostEqual(p[1], 0.5, places=5)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "evertrol/mpyfit",
"path": "test/testfunctions.py",
"copies": "1",
"size": "2078",
"license": "bsd-2-clause",
"hash": -6800201053774186000,
"line_mean": 27.0810810811,
"line_max": 70,
"alpha_frac": 0.5866217517,
"autogenerated": false,
"ratio": 3.0922619047619047,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4178883656461905,
"avg_score": null,
"num_lines": null
} |
"""A number of useful helper functions to automate common tasks."""
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from django.utils.encoding import force_text
from reversion.admin import VersionAdmin
def patch_admin(model, admin_site=None):
"""
Enables version control with full admin integration for a model that has
already been registered with the django admin site.
This is excellent for adding version control to existing Django contrib
applications.
"""
admin_site = admin_site or admin.site
try:
ModelAdmin = admin_site._registry[model].__class__
except KeyError:
raise NotRegistered("The model {model} has not been registered with the admin site.".format(
model = model,
))
# Unregister existing admin class.
admin_site.unregister(model)
# Register patched admin class.
class PatchedModelAdmin(VersionAdmin, ModelAdmin):
pass
admin_site.register(model, PatchedModelAdmin)
# Patch generation methods, only available if the google-diff-match-patch
# library is installed.
#
# http://code.google.com/p/google-diff-match-patch/
try:
from diff_match_patch import diff_match_patch
except ImportError:
pass
else:
dmp = diff_match_patch()
def generate_diffs(old_version, new_version, field_name, cleanup):
"""Generates a diff array for the named field between the two versions."""
# Extract the text from the versions.
old_text = old_version.field_dict[field_name] or ""
new_text = new_version.field_dict[field_name] or ""
# Generate the patch.
diffs = dmp.diff_main(force_text(old_text), force_text(new_text))
if cleanup == "semantic":
dmp.diff_cleanupSemantic(diffs)
elif cleanup == "efficiency":
dmp.diff_cleanupEfficiency(diffs)
elif cleanup is None:
pass
else:
raise ValueError("cleanup parameter should be one of 'semantic', 'efficiency' or None.")
return diffs
def generate_patch(old_version, new_version, field_name, cleanup=None):
"""
Generates a text patch of the named field between the two versions.
The cleanup parameter can be None, "semantic" or "efficiency" to clean up the diff
for greater human readibility.
"""
diffs = generate_diffs(old_version, new_version, field_name, cleanup)
patch = dmp.patch_make(diffs)
return dmp.patch_toText(patch)
def generate_patch_html(old_version, new_version, field_name, cleanup=None):
"""
Generates a pretty html version of the differences between the named
field in two versions.
The cleanup parameter can be None, "semantic" or "efficiency" to clean up the diff
for greater human readibility.
"""
diffs = generate_diffs(old_version, new_version, field_name, cleanup)
return dmp.diff_prettyHtml(diffs)
| {
"repo_name": "Glasgow2015/team-10",
"path": "env/lib/python2.7/site-packages/reversion/helpers.py",
"copies": "13",
"size": "3062",
"license": "apache-2.0",
"hash": -1127671526485623900,
"line_mean": 34.6046511628,
"line_max": 100,
"alpha_frac": 0.6694970607,
"autogenerated": false,
"ratio": 4.21763085399449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A number of useful helper functions to automate common tasks."""
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from reversion.admin import VersionAdmin
def patch_admin(model, admin_site=None):
"""
Enables version control with full admin integration for a model that has
already been registered with the django admin site.
This is excellent for adding version control to existing Django contrib
applications.
"""
admin_site = admin_site or admin.site
try:
ModelAdmin = admin_site._registry[model].__class__
except KeyError:
raise NotRegistered, "The model %r has not been registered with the admin site." % model
# Unregister existing admin class.
admin_site.unregister(model)
# Register patched admin class.
class PatchedModelAdmin(VersionAdmin, ModelAdmin):
pass
admin_site.register(model, PatchedModelAdmin)
# Patch generation methods, only available if the google-diff-match-patch
# library is installed.
#
# http://code.google.com/p/google-diff-match-patch/
try:
from diff_match_patch import diff_match_patch
except ImportError:
pass
else:
dmp = diff_match_patch()
def generate_diffs(old_version, new_version, field_name):
"""Generates a diff array for the named field between the two versions."""
# Extract the text from the versions.
old_text = old_version.field_dict[field_name]
new_text = new_version.field_dict[field_name]
# Generate the patch.
diffs = dmp.diff_main(old_text, new_text)
return diffs
def generate_patch(old_version, new_version, field_name):
"""
Generates a text patch of the named field between the two versions.
"""
diffs = generate_diffs(old_version, new_version, field_name)
patch = dmp.patch_make(diffs)
return dmp.patch_toText(patch)
def generate_patch_html(old_version, new_version, field_name):
"""
Generates a pretty html version of the differences between the named
field in two versions.
"""
diffs = generate_diffs(old_version, new_version, field_name)
return dmp.diff_prettyHtml(diffs)
| {
"repo_name": "empty/django-reversion",
"path": "reversion/helpers.py",
"copies": "1",
"size": "2254",
"license": "bsd-3-clause",
"hash": 8538974544560545000,
"line_mean": 32.1470588235,
"line_max": 96,
"alpha_frac": 0.6805678793,
"autogenerated": false,
"ratio": 4.174074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01347500969737963,
"num_lines": 68
} |
# An unbelievably disgusting compatibility layer because I'm being rushed.
# ---AG
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import dossier.web as web
import yakonfig
class Folders(web.Folders):
DEFAULT_ANNOTATOR_ID = 'unknown'
def __init__(self, *args, **kwargs):
# A horrible hack to create a new `Folders` instance with config.
try:
config = yakonfig.get_global_config('dossier.folders')
# For old configs.
if 'prefix' in config:
config['namespace'] = config.pop('prefix')
except KeyError:
config = {}
super(Folders, self).__init__(*args, **dict(config, **kwargs))
@staticmethod
def name_to_id(v):
return v
@staticmethod
def id_to_name(v):
return v
def folders(self, *args, **kwargs):
return [it.name for it in self.list('/') if it.is_folder()]
def subfolders(self, fid, *args, **kwargs):
return [it.name for it in self.list(fid) if it.is_folder()]
def items(self, fid, subid, *args, **kwargs):
vals = []
for it in self.list(fid + '/' + subid):
if '@' in it.name:
vals.append(tuple(map(lambda s: s.encode('utf-8'),
it.name.split('@'))))
else:
vals.append((it.name.encode('utf-8'), None))
return vals
def grouped_items(self, fid, subid, *args, **kwargs):
d = defaultdict(list)
for cid, subid in self.items(fid, subid):
d[cid].append(subid)
return d
def parent_subfolders(self, ident, *args, **kwargs):
cid, _ = normalize_ident(ident)
# There's no index structure to find parents of one path component.
# So we need to do an exhaustive search. Yay!
vals = []
for folder in list(self.list('/')):
if not folder.is_folder():
continue
for subfolder in list(self.list(folder.path)):
if not subfolder.is_folder():
continue
for item in list(self.list(subfolder.path)):
if not item.is_item():
continue
if cid in item.name.split('@'):
vals.append((folder.name, subfolder.name))
return vals
def normalize_ident(ident):
'''Splits a generic identifier.
If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned.
Otherwise, ``(ident[0], None)`` is returned.
'''
if isinstance(ident, tuple) and len(ident) == 2:
return ident[0], ident[1] # content_id, subtopic_id
else:
return ident, None
| {
"repo_name": "dossier/dossier.models",
"path": "dossier/models/folder.py",
"copies": "1",
"size": "2756",
"license": "mit",
"hash": 105541709091413090,
"line_mean": 31.4235294118,
"line_max": 75,
"alpha_frac": 0.5504354136,
"autogenerated": false,
"ratio": 3.865357643758766,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49157930573587655,
"avg_score": null,
"num_lines": null
} |
# An undercomplete autoencoder on MNIST dataset
from __future__ import division, print_function, absolute_import
import tensorflow.contrib.layers as lays
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 500 # Number of samples in each batch
epoch_num = 5 # Number of epochs to train the network
lr = 0.001 # Learning rate
def resize_batch(imgs):
# A function to resize a batch of MNIST images to (32, 32)
# Args:
# imgs: a numpy array of size [batch_size, 28 X 28].
# Returns:
# a numpy array of size [batch_size, 32, 32].
imgs = imgs.reshape((-1, 28, 28, 1))
resized_imgs = np.zeros((imgs.shape[0], 32, 32, 1))
for i in range(imgs.shape[0]):
resized_imgs[i, ..., 0] = transform.resize(imgs[i, ..., 0], (32, 32))
return resized_imgs
def autoencoder(inputs):
# encoder
# 32 x 32 x 1 -> 16 x 16 x 32
# 16 x 16 x 32 -> 8 x 8 x 16
# 8 x 8 x 16 -> 2 x 2 x 8
net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
# decoder
# 2 x 2 x 8 -> 8 x 8 x 16
# 8 x 8 x 16 -> 16 x 16 x 32
# 16 x 16 x 32 -> 32 x 32 x 1
net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
return net
# read MNIST dataset
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# calculate the number of batches per epoch
batch_per_ep = mnist.train.num_examples // batch_size
ae_inputs = tf.placeholder(tf.float32, (None, 32, 32, 1)) # input to the network (MNIST images)
ae_outputs = autoencoder(ae_inputs) # create the Autoencoder network
# calculate the loss and optimize the network
loss = tf.reduce_mean(tf.square(ae_outputs - ae_inputs)) # claculate the mean square error loss
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
# initialize the network
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for ep in range(epoch_num): # epochs loop
for batch_n in range(batch_per_ep): # batches loop
batch_img, batch_label = mnist.train.next_batch(batch_size) # read a batch
batch_img = batch_img.reshape((-1, 28, 28, 1)) # reshape each sample to an (28, 28) image
batch_img = resize_batch(batch_img) # reshape the images to (32, 32)
_, c = sess.run([train_op, loss], feed_dict={ae_inputs: batch_img})
print('Epoch: {} - cost= {:.5f}'.format((ep + 1), c))
# test the trained network
batch_img, batch_label = mnist.test.next_batch(50)
batch_img = resize_batch(batch_img)
recon_img = sess.run([ae_outputs], feed_dict={ae_inputs: batch_img})[0]
# plot the reconstructed images and their ground truths (inputs)
plt.figure(1)
plt.title('Reconstructed Images')
for i in range(50):
plt.subplot(5, 10, i+1)
plt.imshow(recon_img[i, ..., 0], cmap='gray')
plt.figure(2)
plt.title('Input Images')
for i in range(50):
plt.subplot(5, 10, i+1)
plt.imshow(batch_img[i, ..., 0], cmap='gray')
plt.show()
| {
"repo_name": "astorfi/TensorFlow-World",
"path": "codes/3-neural_networks/undercomplete-autoencoder/code/autoencoder.py",
"copies": "1",
"size": "3507",
"license": "mit",
"hash": -1064150865968228100,
"line_mean": 37.9666666667,
"line_max": 115,
"alpha_frac": 0.626746507,
"autogenerated": false,
"ratio": 3.095322153574581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4222068660574581,
"avg_score": null,
"num_lines": null
} |
"""An unidirectional pipe.
Your average example::
@coro
def iterator():
iterator = yield Iterate(producer)
while 1:
val = yield iterator
if val is sentinel:
break
# do something with val
@coro
def producer():
for i in xrange(100):
yield chunk(i)
"""
from __future__ import with_statement
from cogen.core import events, coroutines
class IterationStopped(Exception):
pass
class IteratedCoroutineInstance(coroutines.CoroutineInstance):
__slots__ = ('iter_token',)
def run_op(self, op, sched):
rop = super(IteratedCoroutineInstance, self).run_op(op, sched)
if isinstance(rop, chunk):
if self.iter_token:
self.iter_token.data = rop
return self.iter_token
return rop
class IterateToken(events.Operation):
def __init__(self, iterator):
self.iterator = iterator
self.abort = False
self.started = False
self.ended = False
self.data = None
coro, args, kwargs = iterator.iterated_coro
coro.constructor = IteratedCoroutineInstance
self.coro = coro(*args, **kwargs)
self.coro.iter_token = self
def finalize(self, sched):
if self.started:
assert self.data
data = self.data.value
self.data = None
return data
else:
self.started = True
return self
#~ from cogen.core.util import debug
#~ @debug(0)
def process(self, sched, coro):
if self.abort:
if self.ended:
return self.iterator, coro
else:
self.ended = True
self.coro.remove_waiter(coro, self.iterator)
sched.active.appendleft((
coroutines.CoroutineException(
IterationStopped,
IterationStopped(),
None
),
self.coro
))
return self.iterator, coro
else:
if self.ended:
return None, coro
else:
if coro is self.coro:
return self, self.iterator.coro
else:
return None, self.coro
def stop(self):
self.abort = True
self.coro.iter_token = None
return self
class chunk(object):
__slots__ = ('value',)
def __init__(self, data):
self.value = data
class chunk_sentinel(object):
pass
sentinel = end_sentinel = chunk_sentinel()
class Iterate(events.Operation):
def __init__(self, coro, args=(), kwargs={}, sentinel=sentinel):
super(Iterate, self).__init__()
self.iterated_coro = coro, args, kwargs
self.started = False
self.sentinel = sentinel
self.chunk = IterateToken(self)
def finalize(self, sched):
self.chunk.ended = True
return self.sentinel
def process(self, sched, coro):
assert not self.started
self.started = True
self.coro = coro
self.chunk.coro.add_waiter(coro, self)
return self.chunk, self.coro
| {
"repo_name": "ionelmc/python-cogen",
"path": "cogen/core/pipe.py",
"copies": "4",
"size": "3383",
"license": "mit",
"hash": -3571057870388024300,
"line_mean": 24.8492063492,
"line_max": 70,
"alpha_frac": 0.5184747266,
"autogenerated": false,
"ratio": 4.4222222222222225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6940696948822223,
"avg_score": null,
"num_lines": null
} |
"""An unofficial library to access the Cleverbot service."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import str # pylint: disable=redefined-builtin
from builtins import object # pylint: disable=redefined-builtin
import collections
import hashlib
import requests
from requests.compat import urlencode
from future.backports.html import parser
# Only use the instance method `unescape` of entity_parser. (I wish it was a
# static method or public function; it never uses `self` anyway)
entity_parser = parser.HTMLParser()
class Cleverbot(object):
"""Handles a conversation with Cleverbot.
Example usage:
>>> from cleverbot import Cleverbot
>>> cb = Cleverbot('my-example-bot')
>>> cb.ask("Hi. How are you?")
"I'm good, thanks. How are you?"
"""
HOST = "www.boibot.com"
PROTOCOL = "http://"
RESOURCE = "/webservicemin?uc=UseOfficialAPI&t=7295&re=yes&"
def __init__(self, botapi, uc='3210'):
"""Cleverbot requests that bots identify themselves when
connecting to the service. You must pass an identifying string
for your bot when you create the connection.
For example:
>> cb = Cleverbot('my-app')
and *not*:
>> cb = Cleverbot()
See: http://www.cleverbot.com/apis
"""
self.botapi = botapi
self.uc = uc
self.SERVICE_URL = self.PROTOCOL + self.HOST + self.RESOURCE + \
"?uc=" + self.uc + "&botapi=" + self.botapi
self.headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)',
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept-Language': 'en-us,en;q=0.8,en-us;q=0.5,en;q=0.3',
'Cache-Control': 'no-cache',
'Host': self.HOST,
'Referer': self.PROTOCOL + self.HOST + '/',
'Pragma': 'no-cache'
}
""" The data that will get passed to Cleverbot """
self.data = collections.OrderedDict(
(
# must be the first pairs
('stimulus', ''),
('cb_settings_language', ''),
('cb_settings_scripting', 'no'),
('islearning', 1), # Never modified
('icognoid', 'wsf'), # Never modified
('icognocheck', ''),
('start', 'y'), # Never modified
('sessionid', ''),
('vText8', ''),
('vText7', ''),
('vText6', ''),
('vText5', ''),
('vText4', ''),
('vText3', ''),
('vText2', ''),
('fno', 0), # Never modified
('prevref', ''),
('emotionaloutput', ''), # Never modified
('emotionalhistory', ''), # Never modified
('asbotname', ''), # Never modified
('ttsvoice', ''), # Never modified
('typing', ''), # Never modified
('lineref', ''),
('sub', 'Say'), # Never modified
('cleanslate', False), # Never modified
)
)
# the log of our conversation with Cleverbot
self.conversation = []
# get the main page to get a cookie (see bug #13)
self.session = requests.Session()
self.session.get(Cleverbot.PROTOCOL + Cleverbot.HOST)
def ask(self, question):
"""Asks Cleverbot a question.
Maintains message history.
:param question: The question to ask
:return Cleverbot's answer
"""
# Set the current question
self.data['stimulus'] = question
# Connect to Cleverbot and remember the response
resp = self._send()
# Add the current question to the conversation log
self.conversation.append(question)
parsed = self._parse(resp.text)
# Set data as appropriate
if self.data['sessionid'] != '':
self.data['sessionid'] = parsed['conversation_id']
# Add Cleverbot's reply to the conversation log
self.conversation.append(parsed['answer'])
return parsed['answer'].encode('latin-1').decode('utf-8')
def _send(self):
"""POST the user's question and all required information to the
Cleverbot service
Cleverbot obfuscates how it generates the 'icognocheck' token. The token
is currently the md5 checksum of the 10th through 36th characters of the
encoded data. This may change in the future.
"""
# Set data as appropriate
if self.conversation:
linecount = 1
for line in reversed(self.conversation):
linecount += 1
self.data['vText' + str(linecount)] = line
if linecount == 8:
break
# Generate the token
enc_data = urlencode(self.data)
digest_txt = enc_data[9:35]
token = hashlib.md5(digest_txt.encode('utf-8')).hexdigest()
self.data['icognocheck'] = token
# POST the data to Cleverbot and return
return self.session.post(self.SERVICE_URL,
data=self.data,
headers=self.headers)
@staticmethod
def _parse(resp_text):
"""Parses Cleverbot's response"""
resp_text = entity_parser.unescape(resp_text)
parsed = [
item.split('\r') for item in resp_text.split('\r\r\r\r\r\r')[:-1]
]
if parsed[0][1] == 'DENIED':
raise CleverbotServiceError()
parsed_dict = {
'answer': parsed[0][0],
'conversation_id': parsed[0][1],
}
try:
parsed_dict['unknown'] = parsed[1][-1]
except IndexError:
parsed_dict['unknown'] = None
return parsed_dict
class CleverbotServiceError(Exception):
"""The Cleverbot service returned an error"""
| {
"repo_name": "Beefywhale/WeenieBot",
"path": "modules/custom_cleverbot/cleverbot.py",
"copies": "1",
"size": "6201",
"license": "mit",
"hash": -1214166607860077600,
"line_mean": 31.9840425532,
"line_max": 80,
"alpha_frac": 0.5360425738,
"autogenerated": false,
"ratio": 4.005813953488372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020625460141991957,
"num_lines": 188
} |
# An "updater" determines when assets should automatically be recreated.
import os
from courant.core.assets.conf import settings
def get_updater(name=None):
"""Return a callable(output, sources) that returns True if the file
``output``, based on the files in the list ``sources`` needs to be
recreated.
See the ``ASSETS_UPDATER`` setting for more information.
"""
if not name:
name = settings.ASSETS_UPDATER
try:
return {
None: update_never,
False: update_never,
"never": update_never,
"timestamp": update_by_timestamp,
"hash": update_by_hash,
"interval": update_by_interval,
"always": update_always,
}[name]
except KeyError:
raise ValueError('Updater "%s" is not valid.' % name)
def update_never(*args):
return False
def update_always(*args):
return True
def update_by_timestamp(output, sources):
o_modified = os.stat(output).st_mtime
s_modified = max([os.stat(s).st_mtime for s in sources])
# TODO: What about using != - could that potentially be more solid?
return s_modified > o_modified
def update_by_hash(output, sources):
raise NotImplementedError()
def update_by_interval(output, sources):
raise NotImplementedError()
| {
"repo_name": "maxcutler/Courant-News",
"path": "courant/core/assets/updater.py",
"copies": "1",
"size": "1372",
"license": "bsd-3-clause",
"hash": -1410696102093851000,
"line_mean": 24.9019607843,
"line_max": 72,
"alpha_frac": 0.6166180758,
"autogenerated": false,
"ratio": 4.083333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199951409133333,
"avg_score": null,
"num_lines": null
} |
"""An upgrade card."""
from csrv.model import actions
from csrv.model import events
from csrv.model import game_object
from csrv.model import timing_phases
from csrv.model.cards import installable_card
from csrv.model.cards import card_info
class Upgrade(installable_card.InstallableCard):
TYPE = card_info.UPGRADE
REZZABLE = True
WHEN_IN_HAND_PROVIDES_CHOICES_FOR = {
timing_phases.CorpTurnActions: 'install_actions',
}
WHEN_INSTALLED_PROVIDES_CHOICES_FOR ={
timing_phases.CorpRezCards: 'rez_actions',
}
WHEN_ACCESSED_PROVIDES_CHOICES_FOR = {
timing_phases.AccessCard: 'trash_on_access_actions',
}
def build_actions(self):
installable_card.InstallableCard.build_actions(self)
self.install_action = actions.InstallUpgrade(
self.game, self.player, self)
self._rez_action = actions.RezAssetUpgrade(self.game, self.player, self)
def install_actions(self):
return [self.install_action]
def rez_actions(self):
if not self.is_rezzed:
return [self._rez_action]
return []
def on_install(self):
installable_card.InstallableCard.on_install(self)
self.trigger_event(events.InstallAgendaAssetUpgrade(self.game, self.player))
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/cards/upgrade.py",
"copies": "1",
"size": "1210",
"license": "apache-2.0",
"hash": -4127110076934129700,
"line_mean": 26.5,
"line_max": 80,
"alpha_frac": 0.7297520661,
"autogenerated": false,
"ratio": 3.3241758241758244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45539278902758246,
"avg_score": null,
"num_lines": null
} |
""" An utility module containing utility functions used by the grader module
and some useful pre-test hooks.
"""
import json
import traceback
def import_module(path, name=None):
if name is None:
name = path
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(name, path)
module = loader.load_module(name)
return module
def is_function(value):
try:
return hasattr(value, '__call__')
except:
return False
## Function descriptions
def beautifyDescription(description):
""" Converts docstring of a function to a test description
by removing excess whitespace and joining the answer on one
line """
lines = (line.strip() for line in description.split('\n'))
return " ".join(filter(lambda x: x, lines))
def setDescription(function, description):
import grader
old_description = grader.get_test_name(function)
if old_description in grader.testcases:
grader.testcases.remove(old_description)
description = beautifyDescription(description)
function.__doc__ = description
grader.testcases.add(description, function)
## Json managing
def load_json(json_string):
" Loads json_string into an dict "
return json.loads(json_string)
def dump_json(ordered_dict):
" Dumps the dict to a string, indented "
return json.dumps(ordered_dict, indent=4)
def get_error_message(exception):
type_ = type(exception)
return "{}: {}".format(type_.__name__, str(exception))
def get_traceback(exception):
type_, value, tb = type(exception), exception, exception.__traceback__
return "".join(traceback.format_exception(type_, value, tb))
def read_code(path):
import tokenize
# encoding-safe open
with tokenize.open(path) as sourceFile:
contents = sourceFile.read()
return contents
| {
"repo_name": "macobo/python-grader",
"path": "grader/utils.py",
"copies": "1",
"size": "1858",
"license": "mit",
"hash": -5235387913198303000,
"line_mean": 25.9275362319,
"line_max": 76,
"alpha_frac": 0.6899892357,
"autogenerated": false,
"ratio": 4.021645021645021,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026399545325376528,
"num_lines": 69
} |
# Använder python 2.7
# Script for capturing images
#
# sudo apt-get install python-picamera python-requests pika
#
# Arduino URL
# http://intranet.strongest.se/gympassv4/index.php/doorpassV5/rmq_store_message_to_queue/doorpass_skagget_picture/tag_en_bild
#
import requests
import MySQLdb
import time
import picamera
import pika
passage_point = "SKAGGET"
send_url = 'http://intranet.strongest.se/gympassv4/index.php/DoorpassWebcam/storewebcamsnap'
rmq_queue_name = 'doorpass_skagget_picture'
def getpassageno():
r = requests.get('http://intranet.strongest.se/gympassv4/index.php/DoorpassWebcam/getlastpassage/' + passage_point, auth=('gym', 'muskler'))
return r.text.split(":")[1]
def picam_snaps():
# camera.start_preview()
# time.sleep(2)
camera.led = True
print("Capturing")
millis_start = int(round(time.time() * 1000))
camera.capture_sequence([
'pycam_image_01.jpg',
'pycam_image_02.jpg',
'pycam_image_03.jpg',
'pycam_image_04.jpg',
'pycam_image_05.jpg'
])#, use_video_port=True)#, burst=True)
millis_end = int(round(time.time() * 1000))
print(millis_end - millis_start)
camera.led = False
# camera.stop_preview()
def send_files(id):
millis_start = int(round(time.time() * 1000))
print("Sending images to server")
files = {'01':open('pycam_image_01.jpg','rb'),
'02':open('pycam_image_02.jpg','rb'),
'03':open('pycam_image_03.jpg','rb'),
'04':open('pycam_image_04.jpg','rb'),
'05':open('pycam_image_05.jpg','rb')}
r = requests.post(send_url, files=files, auth=('gym', 'muskler'), data = {"passage_id":str(id)})
millis_end = int(round(time.time() * 1000))
print(r.text)
print("Time to send:" + str(millis_end - millis_start) + "ms")
# Callback to take a snap
def callback_mq(ch, method, properties, body):
current_id = getpassageno()
print ("Detected passage with ID:" + str(current_id))
picam_snaps()
send_files(current_id)
# Setup RabbitMQ
credentials = pika.PlainCredentials('doorpass', 'doorpass')
connection = pika.BlockingConnection(pika.ConnectionParameters('virtualpt.se', 5672,'/',credentials))
channel = connection.channel()
channel.queue_declare(queue=rmq_queue_name)
channel.basic_consume(callback_mq,
queue=rmq_queue_name,
no_ack=True)
#Setup camera
camera = picamera.PiCamera()
camera.hflip = True
camera.vflip = True
camera.resolution = (1296, 972)
#camera.resolution = (2592, 1944)
camera.framerate = 5
last_passageid = 0
print ("Starting to look for passage")
channel.start_consuming()
| {
"repo_name": "declint/gympassv5",
"path": "gympass_camera.py",
"copies": "1",
"size": "2525",
"license": "apache-2.0",
"hash": -3893323866615433000,
"line_mean": 25.8510638298,
"line_max": 141,
"alpha_frac": 0.6953248811,
"autogenerated": false,
"ratio": 2.711063372717508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3906388253817508,
"avg_score": null,
"num_lines": null
} |
# A
with open("day5.txt", "r") as fh:
l = fh.readline().strip()
index = 0
any_change = True
while any_change:
any_change = False
d = ''
for i, c in enumerate(l[index:]):
if (c.islower() and d == c.upper()) or (d.upper() == c and d.islower()):
l = l[:i + index - 1] + l[i + index +1:]
any_change = True
index = max(0, i + index - 5)
break
d = c
print("A", len(l))
#11590
# B
with open("day5.txt", "r") as fh:
l = fh.readline().strip()
units = 'qwertzuiopasdfghjklyxcvbnm'
min_length = float('inf')
for f in units:
stri = l.replace(f,'').replace(f.upper(),'')
index = 0
any_change = True
while any_change:
any_change = False
d = ''
for i, c in enumerate(stri[index:]):
if (c.islower() and d == c.upper()) or (d.upper() == c and d.islower()):
stri = stri[:i + index - 1] + stri[i + index +1:]
any_change = True
index = max(0, i + index - 5)
break
d = c
if len(stri) < min_length:
min_length = len(stri)
print("B", min_length)
#4504
| {
"repo_name": "Dakror/AdventOfCode",
"path": "aoc18/day5.py",
"copies": "1",
"size": "1033",
"license": "apache-2.0",
"hash": 469934941815241660,
"line_mean": 21,
"line_max": 76,
"alpha_frac": 0.5469506292,
"autogenerated": false,
"ratio": 2.51338199513382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35603326243338196,
"avg_score": null,
"num_lines": null
} |
"""An XBlock providing thumbs-up/thumbs-down voting."""
import logging
import pkg_resources
from web_fragments.fragment import Fragment
from xblock.core import XBlock, XBlockAside
from xblock.fields import Boolean, Integer, Scope
log = logging.getLogger(__name__)
class ThumbsBlockBase:
"""
An XBlock with thumbs-up/thumbs-down voting.
Vote totals are stored for all students to see. Each student is recorded
as has-voted or not.
This demonstrates multiple data scopes and ajax handlers.
"""
upvotes = Integer(help="Number of up votes", default=0, scope=Scope.user_state_summary)
downvotes = Integer(help="Number of down votes", default=0, scope=Scope.user_state_summary)
voted = Boolean(help="Has this student voted?", default=False, scope=Scope.user_state)
def student_view(self, context=None): # pylint: disable=W0613
"""
Create a fragment used to display the XBlock to a student.
`context` is a dictionary used to configure the display (unused)
Returns a `Fragment` object specifying the HTML, CSS, and JavaScript
to display.
"""
# Load the HTML fragment from within the package and fill in the template
html_str = pkg_resources.resource_string(__name__,
"static/html/thumbs.html").decode('utf-8')
frag = Fragment(str(html_str).format(block=self))
# Load the CSS and JavaScript fragments from within the package
css_str = pkg_resources.resource_string(__name__,
"static/css/thumbs.css").decode('utf-8')
frag.add_css(str(css_str))
js_str = pkg_resources.resource_string(__name__,
"static/js/src/thumbs.js").decode('utf-8')
frag.add_javascript(str(js_str))
frag.initialize_js('ThumbsBlock')
return frag
problem_view = student_view
@XBlock.json_handler
def vote(self, data, suffix=''): # pylint: disable=unused-argument
"""
Update the vote count in response to a user action.
"""
# Here is where we would prevent a student from voting twice, but then
# we couldn't click more than once in the demo!
#
# if self.voted:
# log.error("cheater!")
# return
if data['voteType'] not in ('up', 'down'):
log.error('error!')
return None
if data['voteType'] == 'up':
self.upvotes += 1
else:
self.downvotes += 1
self.voted = True
return {'up': self.upvotes, 'down': self.downvotes}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("three thumbs at once",
"""\
<vertical_demo>
<thumbs/>
<thumbs/>
<thumbs/>
</vertical_demo>
""")
]
class ThumbsBlock(ThumbsBlockBase, XBlock):
"""
An XBlock with thumbs-up/thumbs-down voting.
Vote totals are stored for all students to see. Each student is recorded
as has-voted or not.
This demonstrates multiple data scopes and ajax handlers.
"""
class ThumbsAside(ThumbsBlockBase, XBlockAside):
"""
An XBlockAside with thumbs-up/thumbs-down voting.
Vote totals are stored for all students to see. Each student is recorded
as has-voted or not.
This demonstrates multiple data scopes and ajax handlers.
NOTE: Asides aren't ready yet, so this is currently not being installed in
setup.py. When we get back to working on asides, we'll come up with a more
sophisticated mechanism to enable this for the developers that want to see
it.
"""
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context=None): # pylint: disable=unused-argument
"""
Allow the thumbs up/down-voting to work as an Aside as well as an XBlock.
"""
fragment = self.student_view(context)
fragment.initialize_js('ThumbsAside')
return fragment
| {
"repo_name": "stvstnfrd/xblock-sdk",
"path": "sample_xblocks/thumbs/thumbs.py",
"copies": "2",
"size": "4235",
"license": "apache-2.0",
"hash": -3476643052535740000,
"line_mean": 31.5769230769,
"line_max": 95,
"alpha_frac": 0.6061393152,
"autogenerated": false,
"ratio": 4.151960784313726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001289691507215138,
"num_lines": 130
} |
"""An XBlock providing thumbs-up/thumbs-down voting.
This is a completely artifical test case for the filesystem field type. It
behaves just like the sample_xblocks/thumbs example, except it uses filesystem
fields.
Votes are stored in a JSON object in the file system, and up/down arrow PNGs
are constructed as files on-the-fly.
These uses are not great demonstrations of what you can do with a filesystem
field. They should be used for storage of file-like data, usually with
varying file names.
This code is duplicative of much of the thumbs example. If you are interested
in filesystem fields, examining the differences between this block and the
thumbs block will be instructive.
"""
import json
import logging
import pkg_resources
import png
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.fields import Boolean, Scope
from xblock.reference.plugins import Filesystem
log = logging.getLogger(__name__)
ARROW = [
list(map(int, value))
for value in [
'11011',
'10001',
'00000',
'11011',
'11011',
'11011',
'10001',
]
]
@XBlock.needs('fs')
class FileThumbsBlock(XBlock):
"""
An XBlock with thumbs-up/thumbs-down voting.
Vote totals are stored for all students to see. Each student is recorded
as has-voted or not.
This demonstrates multiple data scopes and ajax handlers.
"""
upvotes = 0
downvotes = 0
voted = Boolean(help="Has this student voted?", default=False, scope=Scope.user_state)
fs = Filesystem(help="File system", scope=Scope.user_state_summary) # pylint: disable=invalid-name
def student_view(self, context=None): # pylint: disable=W0613
"""
Create a fragment used to display the XBlock to a student.
`context` is a dictionary used to configure the display (unused)
Returns a `Fragment` object specifying the HTML, CSS, and JavaScript
to display.
"""
# Load the HTML fragment from within the package and fill in the template
html_str = pkg_resources.resource_string(__name__,
"static/html/thumbs.html").decode('utf-8')
frag = Fragment(str(html_str))
if not self.fs.exists("thumbsvotes.json"):
with self.fs.open('thumbsvotes.json', 'wb') as file_output:
file_output.write(json.dumps({'up': 0, 'down': 0}).encode())
file_output.close()
votes = json.load(self.fs.open("thumbsvotes.json"))
self.upvotes = votes['up']
self.downvotes = votes['down']
# Load the CSS and JavaScript fragments from within the package
css_str = pkg_resources.resource_string(__name__,
"static/css/thumbs.css").decode('utf-8')
frag.add_css(str(css_str))
js_str = pkg_resources.resource_string(__name__,
"static/js/src/thumbs.js").decode('utf-8')
frag.add_javascript(str(js_str))
with self.fs.open('uparrow.png', 'wb') as file_output:
png.Writer(len(ARROW[0]), len(ARROW), greyscale=True, bitdepth=1).write(file_output, ARROW)
with self.fs.open('downarrow.png', 'wb') as file_output:
png.Writer(len(ARROW[0]), len(ARROW), greyscale=True, bitdepth=1).write(file_output, ARROW[::-1])
frag.initialize_js('FileThumbsBlock', {'up': self.upvotes,
'down': self.downvotes,
'voted': self.voted,
'uparrow': self.fs.get_url('uparrow.png'),
'downarrow': self.fs.get_url('downarrow.png')})
return frag
problem_view = student_view
@XBlock.json_handler
def vote(self, data, suffix=''): # pylint: disable=unused-argument
"""
Update the vote count in response to a user action.
"""
# Here is where we would prevent a student from voting twice, but then
# we couldn't click more than once in the demo!
#
# if self.voted:
# log.error("cheater!")
# return
votes = json.load(self.fs.open("thumbsvotes.json"))
self.upvotes = votes['up']
self.downvotes = votes['down']
if data['voteType'] not in ('up', 'down'):
log.error('error!')
return None
if data['voteType'] == 'up':
self.upvotes += 1
else:
self.downvotes += 1
with self.fs.open('thumbsvotes.json', 'wb') as file_output:
file_output.write(
json.dumps({'up': self.upvotes, 'down': self.downvotes}).encode()
)
self.voted = True
return {'up': self.upvotes, 'down': self.downvotes}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("filethumbs",
"""\
<vertical_demo>
<filethumbs/>
<filethumbs/>
<filethumbs/>
</vertical_demo>
""")
]
| {
"repo_name": "stvstnfrd/xblock-sdk",
"path": "sample_xblocks/filethumbs/filethumbs.py",
"copies": "2",
"size": "5295",
"license": "apache-2.0",
"hash": 4030897286463813600,
"line_mean": 32.5126582278,
"line_max": 109,
"alpha_frac": 0.5765816808,
"autogenerated": false,
"ratio": 3.9992447129909365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012777742077957508,
"num_lines": 158
} |
# )A)Nx&E_@@YPDXoV
import string
import random
from base64 import b64encode, b64decode
FLAG = 'flag{xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}'
enc_ciphers = ['rot13', 'b64e', 'caesar']
# dec_ciphers = ['rot13', 'b64d', 'caesard']
def rot13(s):
_rot13 = string.maketrans(
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz",
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm")
return string.translate(s, _rot13)
def b64e(s):
return b64encode(s)
def caesar(plaintext, shift=3):
alphabet = string.ascii_lowercase
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
table = string.maketrans(alphabet, shifted_alphabet)
return plaintext.translate(table)
def encode(pt, cnt=50):
tmp = '2{}'.format(b64encode(pt))
for cnt in xrange(cnt):
c = random.choice(enc_ciphers)
i = enc_ciphers.index(c) + 1
_tmp = globals()[c](tmp)
# print(_tmp)
# print(i)
tmp = '{}{}'.format(i, _tmp)
return tmp
# if __name__ == '__main__':
# print encode(FLAG, cnt=?)
def decode_rot13(pt):
print(pt.decode('rot13'))
# def uncaeser(plaintext, shift=3):
# alphabet = string.ascii_lowercase
# shifted_alphabet = alphabet[:shift] + alphabet[shift:]
| {
"repo_name": "frankcash/ctfs",
"path": "csaw2015/foo.py",
"copies": "2",
"size": "1194",
"license": "mit",
"hash": -1151768246741732700,
"line_mean": 24.4042553191,
"line_max": 60,
"alpha_frac": 0.6783919598,
"autogenerated": false,
"ratio": 2.9336609336609336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9504403774477452,
"avg_score": 0.021529823796696313,
"num_lines": 47
} |
ANXIETY_PLAIN = """
Dear {0},
{1}
{2}
{3}
{4}
Sincerely,
Your Anxiety
P.S You can deactivate your account anytime at {domain}/deactivate?uuid={uid}, or just straight up delete it at {domain}/delete?uuid={uid}
"""
ANXIETY_HTML= """
Dear {0},<br><br>
{1} <br>
{2} <br>
{3} <br>
{4} <br><br>
Sincerely, <br> Your Anxiety <br>
<br>
P.S, you can <a href="{domain}/deactivate?uuid={uid}">deactivate</a> your account anytime, or just<a href="{domain}/delete?uuid={uid}">delete</a> it, by clicking either of those.
"""
ACTIVATION_TEMPLATE = """
Dear {0},
You've asked us to fill up an Anxiety Flask for you.
To confirm that, click this link:
{domain}/activate?uuid={uid}
Don't worry. If it gets overwhelming, each email will have a link to deactivate or delete your account in one click. Or, you can do it any time at
{domain}/deactivate
{domain}/delete
Sincerely,
Your Anxiety
"""
ACTIVATION_HTML = """
Dear {0}, <br>
You've asked us to fill up an Anxiety Flask for you. <br>
To confirm that, click <a href="{domain}/activate?uuid={uid}"> here</a>. <br>
Don't worry, if it gets overwhelming you can always <a href="{domain}/deactivate>deactivate</a> or <a href="{domain}/delete">delete</a> your account.<br>
Sincerely, <br> Your Anxiety
"""
FAILURE_PLAIN = """
Sending to {email} failed with error: {code}. Retry? {link}
"""
FAILURE_HTML = """
Sending to {email} failed with error: {code}. <a href="{link}">Retry?</a><br>
"""
ADMIN_PLAIN = """
Dear{0},
The following emails failed to send. You can review each one below:
{emails}
Sincerely,
The Anxiety Flask
"""
ADMIN_HTML = """
Dear{0},<br>
The following emails failed to send. You can review each one below:<br>
{emails}
<br>
Sincerely,<br>
The Anxiety Flask
"""
| {
"repo_name": "padraic-padraic/AnxietyFlask",
"path": "AnxietyFlask/emails.py",
"copies": "1",
"size": "1730",
"license": "mit",
"hash": 5352549597242694000,
"line_mean": 21.7631578947,
"line_max": 178,
"alpha_frac": 0.6780346821,
"autogenerated": false,
"ratio": 2.750397456279809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3928432138379809,
"avg_score": null,
"num_lines": null
} |
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
from . import handler
from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
from . import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getCharacterStream()
if file is None:
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer:
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getNames(self):
return list(self._attrs.keys())
def getQNames(self):
return list(self._attrs.keys())
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return list(self._attrs.keys())
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return list(self._attrs.items())
def values(self):
return list(self._attrs.values())
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError(name)
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError(name)
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return list(self._qnames.values())
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| {
"repo_name": "ms-iot/python",
"path": "cpython/Lib/xml/sax/xmlreader.py",
"copies": "53",
"size": "12684",
"license": "bsd-3-clause",
"hash": -7144981601337528000,
"line_mean": 32.3789473684,
"line_max": 78,
"alpha_frac": 0.6615421003,
"autogenerated": false,
"ratio": 4.649560117302053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010419385618485341,
"num_lines": 380
} |
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return self._attrs.has_key(name)
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| {
"repo_name": "valkjsaaa/sl4a",
"path": "python/src/Lib/xml/sax/xmlreader.py",
"copies": "60",
"size": "12638",
"license": "apache-2.0",
"hash": 1993893840719387000,
"line_mean": 32.1706036745,
"line_max": 78,
"alpha_frac": 0.6615761988,
"autogenerated": false,
"ratio": 4.653166421207659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001589118191096791,
"num_lines": 381
} |
""" an XML-RPC server to allow remote control of PyMol
Author: Greg Landrum (glandrum@users.sourceforge.net)
Created: January 2002
$LastChangedDate$
License: PyMol
Requires:
- a python xmlrpclib distribution containing the SimpleXMLRPCServer
module (1.0 or greater should be fine)
- python with threading enabled
RD Version: $Rev$
"""
import SimpleXMLRPCServer
import threading,sys,time,types,os,tempfile
from pymol import cmd,cgo
# initial port to try for the server
_xmlPort=9123
# number of alternate ports to try if the first fails
_nPortsToTry=5
def rpcCmd(cmdText):
""" executes a PyMol API command
return value is either the result of the command or the empty string
"""
res = cmd.do(cmdText)
if res is not None:
return res
else:
return ''
def rpcQuit():
""" causes PyMol to quit """
cmd.quit()
return 1
def rpcZoom(what=''):
""" executes cmd.zoom(what) """
cmd.zoom(what)
return 1
def rpcSet(prop,val,obj):
""" executes a PyMol set command
return value is either the result of the command or the empty string
"""
res = cmd.set(prop,val,obj)
if res is not None:
return res
else:
return ''
def rpcGet(prop,obj):
""" executes a PyMol get command
return value is either the result of the command or the empty string
"""
res = cmd.get(prop,obj)
if res is not None:
return res
else:
return ''
def rpcPing():
""" Used to establish whether or not the server is alive.
This is a good thing to call after establishing a connection just to
make sure that everything is ok.
Returns 1
"""
return 1
def rpcLabel(pos,labelText,id='lab1',color=(1,1,1)):
""" create a text label
Arguments:
pos: a 3 tuple with the position of the label
text: a string with the label
color: a 3 tuple with the color of the label. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
NOTE:
at the moment this is, how you say, a hack
"""
x,y,z = pos
text="""
Atom
1 0 0 0 0 0 0 0 0 0999 V2000
% 10.4f% 10.4f%10.4f C 0 0 0 0 0 0 0 0 0 0 0 0
M END"""%(x,y,z)
cmd.read_molstr(text,id)
cmd.label("%s"%(id),'"%s"'%labelText)
cmd.hide("nonbonded",id)
cmd.set_color("%s-color"%id,color)
cmd.color("%s-color"%id,id)
return 1
def rpcResetCGO(id):
""" removes a CGO from the local dictionary
"""
global cgoDict
if id=="*":
cgoDict={}
res = 1
elif cgoDict.has_key(id):
del(cgoDict[id])
res = 1
else:
res = 0
return res
def rpcSphere(pos,rad,color,id='cgo',extend=1,
transparent=0,transparency=0.5):
""" create a sphere
Arguments:
pos: a 3 tuple with the position of the sphere
rad: a float with the radius
color: a 3 tuple with the color of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
"""
r,g,b = color
x,y,z = pos
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcRenderCGO(cgoV,id='cgo',extend=1):
""" renders a CGO vector
Arguments:
cgoV: a vector of floats
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
obj.extend(cgoV)
cmd.load_cgo(obj,id,1)
return 1
def rpcSpheres(sphereD,id='cgo',extend=1):
""" create a sphere
Arguments:
sphereD: a series of (pos,rad,color,transparent,transparency) tuples
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
for pos,rad,color,transparent,transparency in sphereD:
r,g,b = color
x,y,z = pos
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcCylinder(end1,end2,rad,color1,id='cgo',color2=None,extend=1,
transparent=0,transparency=0.5):
""" create a cylinder
Arguments:
end1: a 3 tuple with the position of end1 of the sphere
end2: a 3 tuple with the position of end1 of the sphere
rad: a float with the radius
color1: a 3 tuple with the color of end1 of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
color2: (OPTIONAL) a 3 tuple with the color of end2 of the sphere. (1,1,1)
is white
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
NOTE: the reason that color2 follows id is that I think clients are
going to be interested in setting the id more often than they are going
to care about the second color.
"""
global cgoDict
if color2 is None: color2 = color1
r1,g1,b1 = color1
r2,g2,b2 = color2
x1,y1,z1 = end1
x2,y2,z2 = end2
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.CYLINDER,x1,y1,z1,x2,y2,z2,rad,r1,g1,b1,r2,g2,b2,])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcShow(objs):
""" shows (enables) an object (or objects)"""
if type(objs) not in (types.ListType,types.TupleType):
objs = (objs,)
for objName in objs:
try:
cmd.enable(objName)
except:
res = 0
break
else:
res = 1
return res
def rpcHide(objs):
""" hides (disables) an object (or objects) """
if type(objs) not in (types.ListType,types.TupleType):
objs = (objs,)
for objName in objs:
try:
cmd.disable(objName)
except:
res = 0
break
else:
res = 1
return res
def rpcDeleteObject(objName):
""" deletes an object """
try:
cmd.delete(objName)
except:
res = 0
else:
res = 1
return res
def rpcDeleteAll():
""" deletes all objects """
res = cmd.delete('all')
if res is not None:
return res
else:
return ''
def colorObj(objName,colorScheme):
""" sets an molecule's color scheme
Arguments:
- objName: the object (molecule) to change
- colorScheme: name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
"""
if colorScheme:
if colorScheme == 'std':
# this is an adaptation of the cbag scheme from util.py, but
# with a gray carbon.
cmd.color("magenta","("+objName+")",quiet=1)
cmd.color("oxygen","(elem O and "+objName+")",quiet=1)
cmd.color("nitrogen","(elem N and "+objName+")",quiet=1)
cmd.color("sulfur","(elem S and "+objName+")",quiet=1)
cmd.color("hydrogen","(elem H and "+objName+")",quiet=1)
cmd.color("gray","(elem C and "+objName+")",quiet=1)
elif hasattr(utils,colorScheme):
fn = getattr(utils,colorScheme)
fn(objName,quiet=1)
res = 1
else:
res = 0
return res
def rpcLoadPDB(data,objName,colorScheme='',replace=1):
""" loads a molecule from a pdb string
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_pdbstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadMolBlock(data,objName,colorScheme='',replace=1):
""" loads a molecule from a mol block
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_molstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadFile(fileName,objName='',format='',colorScheme='',replace=1):
""" loads an object from a file
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
colorScheme: (OPTIONAL) name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
if not objName:
objName = fileName.split('.')[0]
if replace:
cmd.delete(objName)
res = cmd.load(fileName,objName,format=format)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadSurface(fileName,objName,format='',surfaceLevel=1.0):
""" loads surface data from a file and adds an isosurface
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
if not objName:
objName = fileName.split('.')[0]
gridName = 'grid-%s'%objName
res = cmd.load(fileName,gridName,format='')
cmd.isosurface(objName,gridName,level=surfaceLevel)
if res is not None:
return res
else:
return ''
def rpcLoadSurfaceData(data,objName='surface',format='',surfaceLevel=1.0):
""" loads surface data from a string and adds an isosurface
Arguments:
data: the data to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
gridName = 'grid-%s'%objName
# it would be nice if we didn't have to go by way of the temporary file,
# but at the moment pymol will only read shapes from files
tempnm = tempfile.mktemp('.grd')
open(tempnm,'w+').write(data)
res = rpcLoadSurface(tempnm,objName,format='',surfaceLevel=surfaceLevel)
os.unlink(tempnm)
if res is not None:
return res
else:
return ''
def rpcSave(filename,objName='all',state=0,format=''):
""" executes a cmd.save command
Arguments:
- filename: output filename
- objName: (OPTIONAL) object(s) to be saved
- state: (OPTIONAL)
- format: (OPTIONAL) output format
"""
res = cmd.save(filename,objName,state,format)
if res is not None:
return res
else:
return ''
def rpcRotate(vect,objName='',state=-1):
""" rotates objects
Arguments:
- vect: a sequence with x y and z rotations
- objName: (OPTIONAL) object to be rotated
- state: (OPTIONAL) if zero only visible states are rotated,
if -1 (the default), all states are rotated
"""
cmd.rotate('x',vect[0],objName,state=state)
cmd.rotate('y',vect[1],objName,state=state)
cmd.rotate('z',vect[2],objName,state=state)
return 1
def rpcTranslate(vect,objName='all',state=-1):
""" translates objects
Arguments:
- vect: a sequence with x y and z translations
- objName: (OPTIONAL) object to be translated
- state: (OPTIONAL) if zero only visible states are translated,
if -1 (the default), all states are translated
"""
cmd.translate(vect,objNAme,state=state)
return 1
def rpcGetNames(what='selections',enabledOnly=1):
""" returns the results of cmd.get_names(what) """
return cmd.get_names(what,enabled_only=enabledOnly)
def rpcIdentify(what='all',mode=0):
""" returns the results of cmd.identify(what,mode) """
return cmd.identify(what,mode=mode)
def rpcIndex(what='all'):
""" returns the results of cmd.index(what) """
return cmd.index(what)
def rpcCountAtoms(what='all'):
""" returns the results of cmd.count_atoms(what) """
return cmd.count_atoms(what)
def rpcIdAtom(what='all',mode=0):
""" returns the results of cmd.id_atom(what) """
return cmd.id_atom(what,mode=mode)
def rpcGetAtomCoords(what='all',state=0):
""" returns the results of cmd.get_atom_coords(what,state) """
return cmd.get_atom_coords(what,state=state)
def rpcHelp(what=''):
""" returns general help text or help on a particular command """
global serv
res = 'Command Not Found'
if not what:
res = serv.funcs.keys()
else:
funcs = serv.funcs
if funcs.has_key(what):
fn = funcs[what]
res = "Function: %s("%what
defs = fn.func_defaults
if defs:
code = fn.func_code
nDefs = len(defs)
args = []
i = -1
for i in range(code.co_argcount - nDefs):
args.append(code.co_varnames[i])
for j in range(nDefs):
vName = code.co_varnames[j+i+1]
args.append("%s=%s"%(vName,repr(defs[j])))
res += ','.join(args)
res += ')\n'
if fn.func_doc:
res += fn.func_doc
return res
def launch_XMLRPC(hostname='',port=_xmlPort,nToTry=_nPortsToTry):
""" launches the xmlrpc server into a separate thread
Arguments:
hostname: (OPTIONAL) name of the host for the server
(defaults to be the name of the localhost)
port: (OPTIONAL) the first port to try for the server
nToTry: (OPTIONAL) the number of possible ports to try
(in case the first can't be opened)
"""
if not hostname:
import os
hostname = os.environ.get('PYMOL_RPCHOST','')
if not hostname or hostname.upper()=='LOCALHOST':
hostname = 'localhost'
else:
import socket
hostname=socket.gethostbyname(socket.gethostname())
global cgoDict,serv
cgoDict = {}
for i in range(nToTry):
try:
serv = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname,port+i),logRequests=0)
except:
serv = None
else:
break
if serv:
print 'xml-rpc server running on host %s, port %d'%(hostname,port+i)
serv.register_function(rpcCmd,'do')
serv.register_function(rpcQuit,'quit')
serv.register_function(rpcSet,'set')
serv.register_function(rpcGet,'get')
serv.register_function(rpcPing,'ping')
serv.register_function(rpcResetCGO,'resetCGO')
serv.register_function(rpcRenderCGO,'renderCGO')
serv.register_function(rpcSphere,'sphere')
serv.register_function(rpcSpheres,'spheres')
serv.register_function(rpcCylinder,'cylinder')
serv.register_function(rpcHide,'hide')
serv.register_function(rpcShow,'show')
serv.register_function(rpcZoom,'zoom')
serv.register_function(rpcDeleteObject,'deleteObject')
serv.register_function(rpcDeleteAll,'deleteAll')
serv.register_function(rpcLoadPDB,'loadPDB')
serv.register_function(rpcLoadMolBlock,'loadMolBlock')
serv.register_function(rpcLoadSurface,'loadSurface')
serv.register_function(rpcLoadSurfaceData,'loadSurfaceData')
serv.register_function(rpcLoadFile,'loadFile')
serv.register_function(rpcSave,'save')
serv.register_function(rpcLabel,'label')
serv.register_function(rpcRotate,'rotate')
serv.register_function(rpcTranslate,'translate')
serv.register_function(rpcGetNames,'getNames')
serv.register_function(rpcIdentify,'identify')
serv.register_function(rpcIndex,'index')
serv.register_function(rpcCountAtoms,'countAtoms')
serv.register_function(rpcIdAtom,'idAtom')
serv.register_function(rpcHelp,'help')
serv.register_function(rpcGetAtomCoords,'getAtomCoords')
serv.register_introspection_functions()
t = threading.Thread(target=serv.serve_forever)
t.setDaemon(1)
t.start()
else:
print 'xml-rpc server could not be started'
| {
"repo_name": "rdkit/rdkit-orig",
"path": "External/pymol/modules/pymol/rpc.py",
"copies": "1",
"size": "16968",
"license": "bsd-3-clause",
"hash": -5787077171439044000,
"line_mean": 27.2329450915,
"line_max": 83,
"alpha_frac": 0.6523455917,
"autogenerated": false,
"ratio": 3.4362089914945324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45885545831945324,
"avg_score": null,
"num_lines": null
} |
""" an XML-RPC server to allow remote control of PyMol
Author: Greg Landrum (glandrum@users.sourceforge.net)
Created: January 2002
$LastChangedDate$
License: This file is part of the RDKit. The contents are covered by the terms of
the BSD license which is included in the file license.txt, found at the
root of the RDKit source tree.
Requires:
- a python xmlrpclib distribution containing the SimpleXMLRPCServer
module (1.0 or greater should be fine)
- python with threading enabled
RD Version: $Rev$
"""
from __future__ import print_function
import SimpleXMLRPCServer
import threading,sys,time,types,os,tempfile
from pymol import cmd,cgo
# initial port to try for the server
_xmlPort=9123
# number of alternate ports to try if the first fails
_nPortsToTry=5
def rpcCmd(cmdText):
""" executes a PyMol API command
return value is either the result of the command or the empty string
"""
res = cmd.do(cmdText)
if res is not None:
return res
else:
return ''
def rpcQuit():
""" causes PyMol to quit """
cmd.quit()
return 1
def rpcZoom(what=''):
""" executes cmd.zoom(what) """
cmd.zoom(what)
return 1
def rpcSet(prop,val,obj):
""" executes a PyMol set command
return value is either the result of the command or the empty string
"""
res = cmd.set(prop,val,obj)
if res is not None:
return res
else:
return ''
def rpcGet(prop,obj):
""" executes a PyMol get command
return value is either the result of the command or the empty string
"""
res = cmd.get(prop,obj)
if res is not None:
return res
else:
return ''
def rpcPing():
""" Used to establish whether or not the server is alive.
This is a good thing to call after establishing a connection just to
make sure that everything is ok.
Returns 1
"""
return 1
def rpcLabel(pos,labelText,id='lab1',color=(1,1,1)):
""" create a text label
Arguments:
pos: a 3 tuple with the position of the label
text: a string with the label
color: a 3 tuple with the color of the label. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
NOTE:
at the moment this is, how you say, a hack
"""
x,y,z = pos
text="""
Atom
1 0 0 0 0 0 0 0 0 0999 V2000
% 10.4f% 10.4f%10.4f C 0 0 0 0 0 0 0 0 0 0 0 0
M END"""%(x,y,z)
cmd.read_molstr(text,id)
cmd.label("%s"%(id),'"%s"'%labelText)
cmd.hide("nonbonded",id)
cmd.set_color("%s-color"%id,color)
cmd.color("%s-color"%id,id)
return 1
def rpcResetCGO(id):
""" removes a CGO from the local dictionary
"""
global cgoDict
if id=="*":
cgoDict={}
res = 1
elif cgoDict.has_key(id):
del(cgoDict[id])
res = 1
else:
res = 0
return res
def rpcSphere(pos,rad,color,id='cgo',extend=1,
transparent=0,transparency=0.5):
""" create a sphere
Arguments:
pos: a 3 tuple with the position of the sphere
rad: a float with the radius
color: a 3 tuple with the color of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
"""
r,g,b = color
x,y,z = pos
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcRenderCGO(cgoV,id='cgo',extend=1):
""" renders a CGO vector
Arguments:
cgoV: a vector of floats
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
obj.extend(cgoV)
cmd.load_cgo(obj,id,1)
return 1
def rpcSpheres(sphereD,id='cgo',extend=1):
""" create a sphere
Arguments:
sphereD: a series of (pos,rad,color,transparent,transparency) tuples
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
for pos,rad,color,transparent,transparency in sphereD:
r,g,b = color
x,y,z = pos
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcCylinder(end1,end2,rad,color1,id='cgo',color2=None,extend=1,
transparent=0,transparency=0.5):
""" create a cylinder
Arguments:
end1: a 3 tuple with the position of end1 of the sphere
end2: a 3 tuple with the position of end1 of the sphere
rad: a float with the radius
color1: a 3 tuple with the color of end1 of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
color2: (OPTIONAL) a 3 tuple with the color of end2 of the sphere. (1,1,1)
is white
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
NOTE: the reason that color2 follows id is that I think clients are
going to be interested in setting the id more often than they are going
to care about the second color.
"""
global cgoDict
if color2 is None: color2 = color1
r1,g1,b1 = color1
r2,g2,b2 = color2
x1,y1,z1 = end1
x2,y2,z2 = end2
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.CYLINDER,x1,y1,z1,x2,y2,z2,rad,r1,g1,b1,r2,g2,b2,])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcShow(objs):
""" shows (enables) an object (or objects)"""
if type(objs) not in (types.ListType,types.TupleType):
objs = (objs,)
for objName in objs:
try:
cmd.enable(objName)
except Exception:
res = 0
break
else:
res = 1
return res
def rpcHide(objs):
""" hides (disables) an object (or objects) """
if type(objs) not in (types.ListType,types.TupleType):
objs = (objs,)
for objName in objs:
try:
cmd.disable(objName)
except Exception:
res = 0
break
else:
res = 1
return res
def rpcDeleteObject(objName):
""" deletes an object """
try:
cmd.delete(objName)
except Exception:
res = 0
else:
res = 1
return res
def rpcDeleteAll():
""" deletes all objects """
res = cmd.delete('all')
if res is not None:
return res
else:
return ''
def colorObj(objName,colorScheme):
""" sets an molecule's color scheme
Arguments:
- objName: the object (molecule) to change
- colorScheme: name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
"""
if colorScheme:
if colorScheme == 'std':
# this is an adaptation of the cbag scheme from util.py, but
# with a gray carbon.
cmd.color("magenta","("+objName+")",quiet=1)
cmd.color("oxygen","(elem O and "+objName+")",quiet=1)
cmd.color("nitrogen","(elem N and "+objName+")",quiet=1)
cmd.color("sulfur","(elem S and "+objName+")",quiet=1)
cmd.color("hydrogen","(elem H and "+objName+")",quiet=1)
cmd.color("gray","(elem C and "+objName+")",quiet=1)
elif hasattr(utils,colorScheme):
fn = getattr(utils,colorScheme)
fn(objName,quiet=1)
res = 1
else:
res = 0
return res
def rpcLoadPDB(data,objName,colorScheme='',replace=1):
""" loads a molecule from a pdb string
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_pdbstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadMolBlock(data,objName,colorScheme='',replace=1):
""" loads a molecule from a mol block
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_molstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadFile(fileName,objName='',format='',colorScheme='',replace=1):
""" loads an object from a file
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
colorScheme: (OPTIONAL) name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
if not objName:
objName = fileName.split('.')[0]
if replace:
cmd.delete(objName)
res = cmd.load(fileName,objName,format=format)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadSurface(fileName,objName,format='',surfaceLevel=1.0):
""" loads surface data from a file and adds an isosurface
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
if not objName:
objName = fileName.split('.')[0]
gridName = 'grid-%s'%objName
res = cmd.load(fileName,gridName,format='')
cmd.isosurface(objName,gridName,level=surfaceLevel)
if res is not None:
return res
else:
return ''
def rpcLoadSurfaceData(data,objName='surface',format='',surfaceLevel=1.0):
""" loads surface data from a string and adds an isosurface
Arguments:
data: the data to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
gridName = 'grid-%s'%objName
# it would be nice if we didn't have to go by way of the temporary file,
# but at the moment pymol will only read shapes from files
tempnm = tempfile.mktemp('.grd')
open(tempnm,'w+').write(data)
res = rpcLoadSurface(tempnm,objName,format='',surfaceLevel=surfaceLevel)
os.unlink(tempnm)
if res is not None:
return res
else:
return ''
def rpcSave(filename,objName='all',state=0,format=''):
""" executes a cmd.save command
Arguments:
- filename: output filename
- objName: (OPTIONAL) object(s) to be saved
- state: (OPTIONAL)
- format: (OPTIONAL) output format
"""
res = cmd.save(filename,objName,state,format)
if res is not None:
return res
else:
return ''
def rpcRotate(vect,objName='',state=-1):
""" rotates objects
Arguments:
- vect: a sequence with x y and z rotations
- objName: (OPTIONAL) object to be rotated
- state: (OPTIONAL) if zero only visible states are rotated,
if -1 (the default), all states are rotated
"""
cmd.rotate('x',vect[0],objName,state=state)
cmd.rotate('y',vect[1],objName,state=state)
cmd.rotate('z',vect[2],objName,state=state)
return 1
def rpcTranslate(vect,objName='all',state=-1):
""" translates objects
Arguments:
- vect: a sequence with x y and z translations
- objName: (OPTIONAL) object to be translated
- state: (OPTIONAL) if zero only visible states are translated,
if -1 (the default), all states are translated
"""
cmd.translate(vect,objNAme,state=state)
return 1
def rpcGetNames(what='selections',enabledOnly=1):
""" returns the results of cmd.get_names(what) """
return cmd.get_names(what,enabled_only=enabledOnly)
def rpcIdentify(what='all',mode=0):
""" returns the results of cmd.identify(what,mode) """
return cmd.identify(what,mode=mode)
def rpcIndex(what='all'):
""" returns the results of cmd.index(what) """
return cmd.index(what)
def rpcCountAtoms(what='all'):
""" returns the results of cmd.count_atoms(what) """
return cmd.count_atoms(what)
def rpcIdAtom(what='all',mode=0):
""" returns the results of cmd.id_atom(what) """
return cmd.id_atom(what,mode=mode)
def rpcGetAtomCoords(what='all',state=0):
""" returns the results of cmd.get_atom_coords(what,state) """
return cmd.get_atom_coords(what,state=state)
def rpcHelp(what=''):
""" returns general help text or help on a particular command """
global serv
res = 'Command Not Found'
if not what:
res = serv.funcs.keys()
else:
funcs = serv.funcs
if funcs.has_key(what):
fn = funcs[what]
res = "Function: %s("%what
defs = fn.func_defaults
if defs:
code = fn.func_code
nDefs = len(defs)
args = []
i = -1
for i in range(code.co_argcount - nDefs):
args.append(code.co_varnames[i])
for j in range(nDefs):
vName = code.co_varnames[j+i+1]
args.append("%s=%s"%(vName,repr(defs[j])))
res += ','.join(args)
res += ')\n'
if fn.func_doc:
res += fn.func_doc
return res
def launch_XMLRPC(hostname='',port=_xmlPort,nToTry=_nPortsToTry):
""" launches the xmlrpc server into a separate thread
Arguments:
hostname: (OPTIONAL) name of the host for the server
(defaults to be the name of the localhost)
port: (OPTIONAL) the first port to try for the server
nToTry: (OPTIONAL) the number of possible ports to try
(in case the first can't be opened)
"""
if not hostname:
import os
hostname = os.environ.get('PYMOL_RPCHOST','')
if not hostname or hostname.upper()=='LOCALHOST':
hostname = 'localhost'
else:
import socket
hostname=socket.gethostbyname(socket.gethostname())
global cgoDict,serv
cgoDict = {}
for i in range(nToTry):
try:
serv = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname,port+i),logRequests=0)
except Exception:
serv = None
else:
break
if serv:
print('xml-rpc server running on host %s, port %d'%(hostname,port+i))
serv.register_function(rpcCmd,'do')
serv.register_function(rpcQuit,'quit')
serv.register_function(rpcSet,'set')
serv.register_function(rpcGet,'get')
serv.register_function(rpcPing,'ping')
serv.register_function(rpcResetCGO,'resetCGO')
serv.register_function(rpcRenderCGO,'renderCGO')
serv.register_function(rpcSphere,'sphere')
serv.register_function(rpcSpheres,'spheres')
serv.register_function(rpcCylinder,'cylinder')
serv.register_function(rpcHide,'hide')
serv.register_function(rpcShow,'show')
serv.register_function(rpcZoom,'zoom')
serv.register_function(rpcDeleteObject,'deleteObject')
serv.register_function(rpcDeleteAll,'deleteAll')
serv.register_function(rpcLoadPDB,'loadPDB')
serv.register_function(rpcLoadMolBlock,'loadMolBlock')
serv.register_function(rpcLoadSurface,'loadSurface')
serv.register_function(rpcLoadSurfaceData,'loadSurfaceData')
serv.register_function(rpcLoadFile,'loadFile')
serv.register_function(rpcSave,'save')
serv.register_function(rpcLabel,'label')
serv.register_function(rpcRotate,'rotate')
serv.register_function(rpcTranslate,'translate')
serv.register_function(rpcGetNames,'getNames')
serv.register_function(rpcIdentify,'identify')
serv.register_function(rpcIndex,'index')
serv.register_function(rpcCountAtoms,'countAtoms')
serv.register_function(rpcIdAtom,'idAtom')
serv.register_function(rpcHelp,'help')
serv.register_function(rpcGetAtomCoords,'getAtomCoords')
serv.register_introspection_functions()
t = threading.Thread(target=serv.serve_forever)
t.setDaemon(1)
t.start()
else:
print('xml-rpc server could not be started')
| {
"repo_name": "adalke/rdkit",
"path": "External/pymol/modules/pymol/rpc.py",
"copies": "1",
"size": "17243",
"license": "bsd-3-clause",
"hash": -3496266979421166000,
"line_mean": 27.548013245,
"line_max": 84,
"alpha_frac": 0.6534245781,
"autogenerated": false,
"ratio": 3.4534348087322253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9135631819887181,
"avg_score": 0.09424551338900883,
"num_lines": 604
} |
"""Any auxiliary code, that we might need."""
import os
import json
ENV_DEV = "dev"
"""When run on localhots (dev env)."""
ENV_BM = "BM"
"""When run on IBM Bluemix."""
def select_run_location():
"""Decide where is the server running."""
# TODO: for now, this is not "smart", so lets assume, that it's dev:
return ENV_DEV
def get_server_info():
"""Get host and port for app (server)."""
env = select_run_location()
# select the environment:
if env == ENV_DEV:
return get_server_info_on_local()
# TODO: continue with other envs:
def get_redis_info():
"""Get credentials and host for redis on localhost in a list."""
env = select_run_location()
# select the environment:
if env == ENV_DEV:
return get_redis_login_on_local()
# TODO: continue with other envs:
def get_server_info_on_local():
"""Get host and port for app (server) on IBM Bluemix in a dictionary."""
return {
"host": "127.0.0.1",
"port": 5000
}
def get_redis_login_on_local():
"""Get credentials and host for redis on localhost in a list."""
# TODO: get also password from config file automatically...
cred = ["localhost", 6379]
return cred
def get_server_info_on_bluemix():
"""Get host and port for app (server) on IBM Bluemix in a dictionary."""
try:
# try to decypher credentials for Bluemix:
credentials = json.loads(os.environ["VCAP_APPLICATION"])
cred = {
"host": credentials["host"],
"port": credentials["port"]
}
return cred
except KeyError:
# on problem, assume localhost:
return {
"host": "127.0.0.1",
"port": 5000
}
def get_redis_login_on_bluemix():
"""Get credentials and host for redis on IBM Bluemix in a list."""
try:
# try to decypher credentials for Bluemix:
credentials = json.loads(os.environ["VCAP_SERVICES"])
# cred = {
# "host": credentials["host"],
# "port": credentials["port"],
# "password": credentials["password"]
# }
cred = [credentials["host"], credentials["port"], credentials["password"]]
return cred
except KeyError:
# return {}
return []
def get_redis_login_on_aws():
"""Get credentials and host for redis on AWS in dictionary."""
return {}
| {
"repo_name": "FiB3/pyWebRedis",
"path": "src/auxil.py",
"copies": "1",
"size": "2408",
"license": "mit",
"hash": 4189479809760018400,
"line_mean": 26.3636363636,
"line_max": 82,
"alpha_frac": 0.5884551495,
"autogenerated": false,
"ratio": 3.840510366826156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9928280959918708,
"avg_score": 0.00013691128148959474,
"num_lines": 88
} |
ANYCHAR = '.'; ESCAPE = '\\'
REGIONA = '['; REGIONO = ']'; RANGE = '-'; COMPLEMENT = '^'
GROUPA = '(' ; GROUPO = ')'; ALTERNATIVE = '|'
PERHAPS = '?' ; STAR = '*'; JUST_ONE_and_STAR = '+'
EXTENSION = '(?'; SKIPSTORE = '?:'
def match_region(ch, pattern):
""" region: a list of comparation chars and ranges in [] or [^]"""
if pattern[1]==COMPLEMENT: booly=False; booln=True; i=2
else: booly=True; booln=False; i=1
while i < len(pattern):
if pattern[i]==ESCAPE:
if pattern[i+1]==ch: return booly
else: i+=2
elif i<len(pattern)-2 and pattern[i+1]==RANGE:
if pattern[i]<=ch<=pattern[i+2]: return booly
else: i+=2
elif pattern[i]==ch: return booly
else: i+=1
return booln
def match_simple_token(sarg, i, token):
global MATCHLEN; c0=token[0]; ch=sarg[i]
if c0==ESCAPE:
c1=token[1]
if c1=='s' and ch in (' ','\t','\r','\n','\f','\v'): MATCHLEN=1; return True
elif c1=='S' and ch not in (' ','\t','\r','\n','\f','\v'): MATCHLEN=1; return True
elif '0'<=c1<='9':
captured=xGroups[int(c1)]; lg=len(captured)
if sarg[i:i+lg]==captured: MATCHLEN=lg; return True
elif ch==c1: MATCHLEN=1; return True
elif c0==REGIONA and match_region(ch,token): MATCHLEN=1; return True
elif c0==ANYCHAR or c0==ch: MATCHLEN=1; return True
return False
def strip_groupattris(s):
if any([s.startswith(EXTENSION+c) for c in (':','=','!')]): s=s[3:]
else: s=s[1:]
if not s.endswith(')'): return s[:-2]
return s[:-1]
TokenListCache = {}; xGroups = []
def parse_pattern(pattern, nested):
"""
tokens are:
1. patterns included in brackets (parsing is recursive)
2. regions
3. \\ together with the escaped character
4. periods
5. simple characters
All paired to 2-tuples with their trailing quantifiers or None
"""
if pattern in TokenListCache.keys(): return TokenListCache[pattern]
tokens=[]; i=0; pL = len(pattern)
while i < pL:
c = pattern[i]
if c==REGIONA:
k = pattern.find(REGIONO, i)
if k==-1: raise ValueError('Unmatched '+REGIONA+' in '+pattern)
while pattern[k-1] == ESCAPE:
k = pattern.find(REGIONO, k+1)
if k==-1: raise ValueError('Unmatched '+REGIONA+' in '+pattern)
tokens.append(pattern[i:k+1]); i=k+1
elif c == ANYCHAR: tokens.append(ANYCHAR); i+=1
elif c == ESCAPE:
if i<pL-1: tokens.append(pattern[i:i+2]); i+=2
else: raise ValueError('Trailing '+ESCAPE)
elif nested and c==GROUPA:
resu = GROUPA; k=i+1; lv=1
while lv > 0:
cc = pattern[k]
if cc == ESCAPE: resu+=cc; resu+=pattern[k+1]; k+=2; continue
if cc == GROUPA: lv+=1
elif cc == GROUPO: lv-=1
resu+=cc; k+=1
tokens.append(resu); i=k; kore=strip_groupattris(resu)
if resu not in TokenListCache.keys():
TokenListCache[resu] = []
# groups are parsed to lists of token lists, each an alternative from '|'
if kore[0] != GROUPA:
for s in kore.split(ALTERNATIVE):
TokenListCache[resu].append(parse_pattern(s, True))
else:
TokenListCache[resu].append(parse_pattern(kore, True))
else: tokens.append(c); i+=1
if i<pL:
if pattern[i]==PERHAPS: tokens[-1]=(tokens[-1],PERHAPS); i+=1
elif pattern[i]==STAR: tokens[-1]=(tokens[-1],STAR); i+=1
elif pattern[i]==JUST_ONE_and_STAR:
tokens.append((tokens[-1],STAR)); tokens[-2]=(tokens[-2],None); i+=1
else: tokens[-1] = (tokens[-1],None)
else: tokens[-1] = (tokens[-1],None)
TokenListCache[pattern]=tokens; return tokens
def try_index(sarg, tokens, ns):
tkL=len(tokens)-1; L=len(sarg); global MATCHEND; compix=MATCHEND
for tix in range(tkL+1):
if compix==L: return any([pair[1] for pair in tokens[tix:]])
ctk, qua = tokens[tix]
if qua and tix<tkL and try_index(sarg, tokens[tix+1:], ns): return True
if ns and ctk[0] == GROUPA:
if any([try_index(sarg, t, True) for t in TokenListCache[ctk]]):
if ctk[1:3] != SKIPSTORE: xGroups.append(sarg[compix:MATCHEND])
if ctk.startswith(EXTENSION+'='): continue
elif ctk.startswith(EXTENSION+'!'): return False
compix=MATCHEND
if qua==STAR:
T = TokenListCache[ctk]
while compix<L:
if tix<tkL and try_index(sarg, tokens[tix+1:], ns): return True
if not any([try_index(sarg, t, ns) for t in T]): break
compix=MATCHEND
else:
if ctk.startswith(EXTENSION+'!'): continue
if tix<tkL or not qua: return False
elif match_simple_token(sarg, compix, ctk):
compix+=MATCHLEN; MATCHEND=compix
if qua==STAR:
while compix<L:
if tix<tkL and try_index(sarg, tokens[tix+1:], ns): return True
if not match_simple_token(sarg, compix, ctk): break
compix+=MATCHLEN; MATCHEND=compix
elif tix<tkL or not qua: return False
return True
def xsearch(sarg, pattern, nested=False, start=0):
tokens = parse_pattern(pattern, nested); L=len(sarg); global MATCHEND
if nested: global xGroups; xGroups=[]
while start<L:
MATCHEND=start
if try_index(sarg, tokens, nested): return (start, MATCHEND)
start+=1
return ()
def xfinditer(sarg, pattern, nested=False, start=0):
tokens = parse_pattern(pattern, nested); n=0; L=len(sarg); global MATCHEND
if nested: global xGroups; xGroups=[]
while start<L:
if n: start=n
MATCHEND=start
if try_index(sarg, tokens, nested): n=MATCHEND; yield (start, MATCHEND)
else: n=0; start+=1
raise StopIteration()
def xmatch(sarg, pattern, nested=False):
""" checks, whether sarg as the whole matches the pattern """
tokens = parse_pattern(pattern, nested); global MATCHEND; MATCHEND=0
if nested: global xGroups; xGroups=[]
if try_index(sarg, tokens, nested) and MATCHEND==len(sarg): return True
return False
def xsplit(sarg, pattern, nested=False):
resu = []; xpair = xsearch(sarg, pattern, nested=nested); residue=0
while xpair:
resu.append(sarg[:xpair[0]])
residue = xpair[1]
xpair = xsearch(sarg, pattern, nested, xpair[1])
return resu+sarg[residue:]
def xreplace(sarg, pattern, subst, nested=False):
to_replace=[]; s=sarg; xpair=xsearch(sarg, pattern, nested)
while xpair:
to_replace.append(xpair); xpair=xsearch(sarg, pattern, nested, xpair[1])
if nested:
for i in range(len(xGroups)): subst=subst.replace(ESCAPE+str(i), xGroups[i])
for xpair in reversed(to_replace): s = s[:xpair[0]]+subst+s[xpair[1]:]
return s
def xfuncreplace(sarg, pattern, f, nested=False):
to_replace=[]; s=sarg; xpair=xsearch(sarg, pattern, nested)
while xpair:
to_replace.append(xpair); xpair=xsearch(sarg, pattern, nested, xpair[1])
for xpair in reversed(to_replace):
s = s[:xpair[0]]+f(s[xpair[0]:xpair[1]])+s[xpair[1]:]
return s
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577251_Simple_regex_engine_elementary_/recipe-577251.py",
"copies": "1",
"size": "7508",
"license": "mit",
"hash": -8155037987477256000,
"line_mean": 41.6590909091,
"line_max": 90,
"alpha_frac": 0.5663292488,
"autogenerated": false,
"ratio": 3.2558542931483085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43221835419483084,
"avg_score": null,
"num_lines": null
} |
from dummyprobe import DummyProbe
import subprocess
import platform
import re
import com.xhaus.jyson.JysonCodec as json
import logging
logger = logging.getLogger(__name__)
class ExecProbe(DummyProbe):
def initialize(self):
if self.getInputProperty("regex") != None:
logger.info("Got regex for parsing: %s", self.getInputProperty("regex"))
self.groupRe = re.compile(self.getInputProperty("regex"))
else:
self.groupRe = None
self.metrics = {}
if self.getInputProperty("metrics") != None:
for metric in self.getInputProperty("metrics"):
self.metrics[metric] = 1
self.terms = {}
if self.getInputProperty("terms") != None:
for term in self.getInputProperty("terms"):
self.terms[term] = 1
def tick(self):
stream = subprocess.Popen(self.getInputProperty("command"), shell=True, bufsize=0, stdout=subprocess.PIPE)
for line in stream.stdout:
line = line.rstrip()
out = {}
if self.groupRe == None:
self.processData({ "@timestamp": self.nowDt(), "value" : line })
else:
out["@timestamp"] = self.nowDt()
out["command"] = self.getInputProperty("command")
out["host"] = platform.node()
out["class"] = "exec"
matches = re.match(self.groupRe, line)
metrics = {}
terms = {}
if matches:
for key in matches.groupdict():
if key in self.metrics:
metrics[key] = matches.group(key)
elif key in self.terms:
terms[key] = matches.group(key)
else:
out[key] = matches.group(key)
self.processData(out)
for key in metrics:
try:
out["metric"] = key
if self.getInputProperty("decimalMark"):
metrics[key] = metrics[key].replace(self.getInputProperty("decimalMark"), ".")
out["value"] = float(metrics[key])
self.processData(out)
except Exception, ex:
logger.warning("Failure to parse %s as float for metric %s", key, metrics[key])
#self.processData(out)
if 'value' in out:
del out['value']
for key in terms:
out["metric"] = key
out["term"] = str(terms[key])
self.processData(out)
else:
logger.debug("Discarding line \"%s\", no match", line)
| {
"repo_name": "filipealmeida/probespawner",
"path": "execprobe.py",
"copies": "1",
"size": "2263",
"license": "unlicense",
"hash": 6497893528812113000,
"line_mean": 30.4305555556,
"line_max": 108,
"alpha_frac": 0.6482545294,
"autogenerated": false,
"ratio": 3.2514367816091956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9158977401659247,
"avg_score": 0.048142781869989534,
"num_lines": 72
} |
from __future__ import division
import os
import shutil
import subprocess
import tempfile
import numpy as np
from .coords import angstrom
__all__ = ['MopacSolver']
def MopacSolver(cmd='mopac', method='PM7', workdir=None):
"""
Crate a solver that wraps `MOPAC <http://openmopac.net>`_.
Mopac needs to be installed on the system.
:param str cmd: MOPAC executable
:param str method: model to calculate energy
"""
kcal = 1 / 627.503
tmpdir = workdir or tempfile.mkdtemp()
try:
atoms, lattice = yield
while True:
mopac_input = '{} 1SCF GRADIENTS\n\n\n'.format(method) + '\n'.join(
'{} {} 1 {} 1 {} 1'.format(el, *coord) for el, coord in atoms
)
if lattice is not None:
mopac_input += '\n' + '\n'.join(
'Tv {} 1 {} 1 {} 1'.format(*vec) for vec in lattice
)
input_file = os.path.join(tmpdir, 'job.mop')
with open(input_file, 'w') as f:
f.write(mopac_input)
subprocess.check_call([cmd, input_file])
with open(os.path.join(tmpdir, 'job.out')) as f:
energy = float(
next(l for l in f if 'FINAL HEAT OF FORMATION' in l).split()[5]
)
next(l for l in f if 'FINAL POINT AND DERIVATIVES' in l)
next(f)
next(f)
gradients = np.array(
[
[float(next(f).split()[6]) for _ in range(3)]
for _ in range(len(atoms) + (0 if lattice is None else 3))
]
)
atoms, lattice = yield energy * kcal, gradients * kcal / angstrom
finally:
if tmpdir != workdir:
shutil.rmtree(tmpdir)
def GenericSolver(f, *args, **kwargs):
delta = kwargs.pop('delta', 1e-3)
atoms, lattice = yield
while True:
energy = f(atoms, lattice, *args, **kwargs)
coords = np.array([coord for _, coord in atoms])
gradients = np.zeros(coords.shape)
for i_atom in range(coords.shape[0]):
for i_xyz in range(3):
ene = {}
for step in [-2, -1, 1, 2]:
coords_diff = coords.copy()
coords_diff[i_atom, i_xyz] += step * delta
atoms_diff = list(zip([sp for sp, _, in atoms], coords_diff))
ene[step] = f(atoms_diff, lattice, *args, **kwargs)
gradients[i_atom, i_xyz] = _diff5(ene, delta)
if lattice is not None:
lattice_grads = np.zeros((3, 3))
for i_vec in range(3):
for i_xyz in range(3):
ene = {}
for step in [-2, -1, 1, 2]:
lattice_diff = lattice.copy()
lattice_diff[i_vec, i_xyz] += step * delta
ene[step] = f(atoms, lattice_diff, *args, **kwargs)
lattice_grads[i_vec, i_xyz] = _diff5(ene, delta)
gradients = np.vstack((gradients, lattice_grads))
atoms, lattice = yield energy, gradients / angstrom
def _diff5(x, delta):
return (1 / 12 * x[-2] - 2 / 3 * x[-1] + 2 / 3 * x[1] - 1 / 12 * x[2]) / delta
| {
"repo_name": "azag0/pyberny",
"path": "src/berny/solvers.py",
"copies": "1",
"size": "3411",
"license": "mpl-2.0",
"hash": 1405582267500833300,
"line_mean": 36.0760869565,
"line_max": 83,
"alpha_frac": 0.4986807388,
"autogenerated": false,
"ratio": 3.6716899892357375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46703707280357376,
"avg_score": null,
"num_lines": null
} |
from __future__ import unicode_literals
from chatbot_reply import Script, rule
class HokeyPokeyScript(Script):
def setup(self):
self.botvars["mood"] = "good"
self.botvars["bodypart"] = "right foot"
self.botvars["danced"] = False
self.bodyparts = ['right foot', 'left foot', 'right arm',
'left arm', 'whole self']
@rule("help (hokey pokey|fun stuff|knock knock|jokes)")
def rule_help_fun_stuff(self):
return ["Tell me a knock knock joke please!",
"Do you know any knock knock jokes? Tell me one.",
"Ask me if I can do the hokey pokey."]
@rule("how are you doing")
def rule_how_are_you_doing(self):
mood = self.botvars["mood"]
return "I'm in a {0} mood.".format(mood)
@rule("get grumpy")
def rule_get_grumpy(self):
self.botvars["mood"] = "bad"
return "Now I'm grouchy."
@rule("get happy")
def rule_get_happy(self):
self.botvars["mood"] = "good"
return "I feel much better."
@rule("hey [there]")
def rule_hey_opt(self):
if self.botvars["mood"] == "good":
return "<hello>"
else:
return "Hay is for horses."
@rule("knock knock")
def rule_knock_knock(self):
return "Who's there?"
@rule("_*", previous_reply="whos there")
def rule_star_prev_who_is_there(self):
return "{raw_match0} who?"
@rule("_*", previous_reply="* who")
def rule_star_prev_star_who(self):
return "Lol {raw_match0}! That's a good one!"
@rule("put your _* in")
def rule_put_your_star_in(self):
return ("I put my {match0} in, I put my {match0} out, "
"I shake it all about!")
@rule("where are you in the dance")
def rule_where_are_you_in_the_dance(self):
return "I'm about to use my {0}.".format(self.botvars["bodypart"])
@rule("back to the right foot")
def rule_back_to_the_right_foot(self):
self.botvars["bodypart"] = "right foot"
return "OK, I'm back on the right foot."
@rule("what would the next one be")
def rule_what_would_the_next_one_be(self):
next_part = self.next_body_part(self.botvars["bodypart"])
return "After {0} comes {1}.".format(self.botvars["bodypart"],
next_part)
@rule("skip to the next one")
def rule_skip_to_the_next_one(self):
self.botvars["bodypart"] = self.next_body_part(self.botvars["bodypart"])
return "OK, when I dance I'll use my {0}.".format(self.botvars["bodypart"])
@rule("[*] do the hokey pokey")
def rule_do_the_hokey_pokey(self):
self.botvars["danced"] = True
bodypart = self.botvars["bodypart"]
self.botvars["bodypart"] = self.next_body_part(bodypart)
return "<put your {0} in>".format(bodypart)
@rule("(have you done|did you do) the hokey pokey")
def rule_have_you_done_the_hokey_pokey(self):
if self.botvars["danced"]:
return "Yes!"
else:
return "No, but I'd like to!"
@rule("do you know [how to do] the hokey pokey")
def rule_can_you_do_the_hokey_pokey(self):
if self.botvars["danced"]:
return "Yes!"
else:
return "I think so! I'd like to try!"
def next_body_part(self, bodypart):
return self.bodyparts[(self.bodyparts.index(bodypart) + 1)
% len(self.bodyparts)]
| {
"repo_name": "gazally/indigo-chatbot",
"path": "example_scripts/dance.py",
"copies": "1",
"size": "3626",
"license": "mpl-2.0",
"hash": 3315244519027177500,
"line_mean": 34.2038834951,
"line_max": 83,
"alpha_frac": 0.5730832874,
"autogenerated": false,
"ratio": 3.2903811252268604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43634644126268607,
"avg_score": null,
"num_lines": null
} |
from __future__ import unicode_literals
from chatbot_reply import Script, rule
class ValveScript(Script):
def setup(self):
self.alternates = {}
self.alternates["mainvalve"] = \
"((shutoff|shut off|main|main water|city water) valve)"
self.alternates["drainvalve"] = "([water] drain valve)"
self.alternates["anyvalve"] = \
"({0}|{1})".format(self.alternates["mainvalve"],
self.alternates["drainvalve"])
def setup_user(self, user):
self.uservars["mainvalvestatus"] = "open"
self.uservars["drainvalvestatus"] = "closed"
self.uservars["leaksensorstatus"] = "dry"
@rule("status")
def rule_status(self):
return ("Here is where I would tell you everything I know about "
"the shutoff valve and the drain valve, as well as the water "
"sensors.")
@rule("valve status")
def rule_valve_status(self):
return "<shutoff valve status> <drain valve status> <water sensor status>"
@rule("_%a:mainvalve status")
def rule_what_is_the_mainvalve_status(self):
return "The {{match0}} is {0}.".format(self.mainvalvestatus())
@rule("_%a:drainvalve status")
def rule_what_is_the_drainvalve_status(self):
return "The {{match0}} is {0}.".format(self.drainvalvestatus())
@rule("(tell me about the|how is the|what is [the]) _%a:anyvalve [status]")
def rule_what_is_the_anyvalve_status(self):
return "<{match0} status>"
@rule("is the _%a:anyvalve (open|closed)")
def rule_is_the_anyvalve_open_or_closed(self):
return "<{match0} status>"
@rule("open [the] _%a:mainvalve")
def rule_open_the_mainvalve(self):
if self.drainvalvestatus() == "open":
return "The drain valve is open. Please close it before opening the {match0}."
if self.leaksensorstatus() == "wet":
return "<leak sensor status> Please dry it and reset it before opening the {match0}."
self.tellmainvalve("open")
return "I'll tell the {match0} to open" + self.stall_for_time()
@rule("open [the] _%a:drainvalve")
def rule_open_the_drainvalve(self):
if self.mainvalvestatus() == "open":
return "The shutoff valve is open. Please close it first."
else:
self.telldrainvalve("open")
return "I'll tell the {match0} to open" + self.stall_for_time()
@rule("close [the] _%a:mainvalve")
def rule_close_the_mainvalve(self):
self.tellmainvalve("close")
return "I'll tell the {match0} to close" + self.stall_for_time()
@rule("close [the] _%a:drainvalve")
def rule_close_the_drainvalve(self):
self.telldrainvalve("close")
return "I will tell the {match0} to close" + self.stall_for_time()
def stall_for_time(self):
return self.choose([" and get back to you shortly.",
". Give me just a moment.",
". I'll check back with you shortly.",
". I'll check back with you in a moment.",
" and get back to you in a moment.",
" and get back to you in just a moment."])
@rule("_(open|close) it", previous_reply="* shutoff valve * drain valve *")
def rule_open_close_it_previous_both_valves(self):
return "What do you want me to {match0}?"
@rule("_(open|close) it", previous_reply="* _%a:anyvalve [*]")
def rule_open_close_it_previous_any_valve(self):
return "<{match0} the {botmatch0}>"
@rule("_(open|close) [it]")
def rule_open_close_it(self):
return "What do you want me to {match0}?"
@rule("[the] _%a:anyvalve", previous_reply="what do you want me to _(open|close)")
def rule_the_anyvalve_with_previous_whaddayawant(self):
return "OK, <{botmatch0} the {match0}>"
@rule("[turn [the]] water on")
def rule_turn_the_water_on(self):
if self.mainvalvestatus() == "open":
return "It's already on."
if self.leaksensorstatus() == "wet":
return "<leak sensor status> Please dry it and reset it before turning the water on."
if self.drainvalvestatus() == "open":
self.telldrainvalve("close")
return "I closed the drain valve and <open shutoff valve>"
else:
return "<open shutoff valve>"
@rule("[turn [the]] water off")
def rule_turn_the_water_off(self):
return "<close shutoff valve>"
@rule("drain [the] (water|house)")
def rule_drain_the_house(self):
if self.drainvalvestatus() == "open":
return "It's already drained."
if self.mainvalvestatus() == "open":
self.tellmainvalve("close")
return "I closed the main valve and <open drain valve>"
else:
return "<open drain valve>"
@rule("(water|leak) sensor status")
def rule_water_sensor_status(self):
return "The water leak sensor is {0}.".format(self.leaksensorstatus())
@rule("sensor wet")
def rule_sensor_wet(self):
self.uservars["leaksensorstatus"] = "wet"
return "Now the leak sensor is wet."
@rule("sensor dry")
def rule_sensor_dry(self):
self.uservars["leaksensorstatus"] = "dry"
return "Now the leak sensor is dry."
def mainvalvestatus(self):
return self.uservars["mainvalvestatus"]
def drainvalvestatus(self):
return self.uservars["drainvalvestatus"]
def tellmainvalve(self, todo):
if todo == "close":
newstate = "closed"
else:
newstate = "open"
self.uservars["mainvalvestatus"] = newstate
def telldrainvalve(self, todo):
if todo == "close":
newstate = "closed"
else:
newstate = "open"
self.uservars["drainvalvestatus"] = newstate
def leaksensorstatus(self):
return self.uservars["leaksensorstatus"]
| {
"repo_name": "gazally/indigo-chatbot",
"path": "test/test_scripts/valves.py",
"copies": "1",
"size": "6135",
"license": "mpl-2.0",
"hash": 8341321681530088000,
"line_mean": 36.8703703704,
"line_max": 97,
"alpha_frac": 0.5926650367,
"autogenerated": false,
"ratio": 3.548293811451706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9630097547611578,
"avg_score": 0.002172260108025662,
"num_lines": 162
} |
from __future__ import unicode_literals
import string
from chatbot_reply import Script, rule
class TutorialScript(Script):
def setup(self):
self.alternates = {"colors": "(red|yellow|orange|green|blue|indigo|violet)"}
self.help_ideas = ["valve", "eliza", "fun stuff"]
@rule("random help")
def rule_random_help(self):
return "<help {0}>".format(self.choose(self.help_ideas))
@rule("*")
def rule_star(self):
return ["I don't understand that. <random help>",
"Let's change the subject. <random help>"]
@rule("hello robot")
def rule_hello_robot(self):
return "Hello, carbon-based life form!"
@rule("how are you", weight=2)
def rule_how_are_you(self):
return ["I'm great, how are you?",
"Doing awesome, you?",
"Great! You?",
"I'm fine, thanks for asking!"]
@rule("say something random")
def rule_say_something_random(self):
word = self.choose(["it's fun", "potato"])
return "I like being random because {0}.".format(word)
@rule("greetings")
def rule_greetings(self):
return [("Hello!", 20),
("Buenas dias!", 25),
("Buongiorno!", 1)]
@rule("_* told me to say _*")
def rule_star2_told_me_to_say_star(self):
return ['Why would {raw_match0} tell you to say "{match1}"?',
'Are you just saying "{match1}" because {raw_match0} told you to?']
@rule("i am _#1 years old")
def rule_i_am_number1_years_old(self):
return "{match0} isn't old at all!"
@rule("who is _*")
def rule_who_is_star(self):
return "I don't know who {match0} is."
@rule("i am @~3 years old")
def rule_i_am_atsign3_years_old(self):
return "Tell me that again, but with a number this time."
@rule("i am * years old")
def rule_i_am_star_years_old(self):
return "Can you use a number instead?"
@rule("are you a (bot|robot|computer|machine)")
def rule_are_you_a_alt(self):
return "Darn! You got me!"
@rule("i am _(so|really|very) excited")
def rule_i_am_alt_excited(self):
return "What are you {match0} excited about?"
@rule("i _(like|love) the color _*")
def rule_i_alt_the_color_star(self):
return ["What a coincidence! I {match0} that color too!",
"The color {match1} is one of my favorites",
"Really? I {match0} the color {match1} too!",
"Oh I {match0} {match1} too!"]
@rule("how [are] you")
def rule_how_opt_you(self):
return "I'm great, you?"
@rule("what is your (home|office|cell) [phone] number")
def rule_what_is_your_alt_opt_number(self):
return "You can reach me at: 1 (800) 555-1234."
@rule("i have a [red|green|blue] car")
def rule_i_have_a_optalt_car(self):
return "I bet you like your car a lot."
@rule("[*] the matrix [*]")
def rule_optstar_the_matrix_optstar(self):
return "How do you know about the matrix?"
@rule("what color is my _(red|blue|green|yellow) _*")
def rule_what_color_is_my_alt_star(self):
return "According to you, your {match1} is {match0}."
@rule("my _* is _%a:colors")
def rule_my_star_is_arrcolors(self):
return "I've always wanted a {match1} {match0}."
@rule("google _*", weight=10)
def rule_google_star(self):
return "OK, I'll google it. Jk, I'm not Siri."
@rule("_* or whatever", weight=100)
def rule_star_or_whatever(self):
return "Whatever. <{match0}>"
@rule("hello")
def rule_hello(self):
return ["Hi there!", "Hey!", "Howdy!"]
@rule("hi")
def rule_hi(self):
return "<hello>"
@rule("my name is _@~3")
def rule_my_name_is_star(self):
name = self.match["raw_match0"].rstrip(string.punctuation)
self.uservars["name"] = name
return "It's nice to meet you, {0}.".format(name)
@rule("what is my name")
def rule_what_is_my_name(self):
if "name" not in self.uservars:
return "You never told me your name."
else:
return ["Your name is {0}.".format(self.uservars["name"]),
"You told me your name is {0}.".format(self.uservars["name"])]
@rule("is my name %u:name")
def rule_is_my_name(self):
return "That's what you told me!"
| {
"repo_name": "gazally/indigo-chatbot",
"path": "example_scripts/examples.py",
"copies": "1",
"size": "4555",
"license": "mpl-2.0",
"hash": 8235274292606235000,
"line_mean": 32.0072463768,
"line_max": 84,
"alpha_frac": 0.5732162459,
"autogenerated": false,
"ratio": 3.2699210337401294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9309409705029585,
"avg_score": 0.006745514922108766,
"num_lines": 138
} |
# Creates the precomplete file containing the remove and rmdir application
# update instructions which is used to remove files and directories that are no
# longer present in a complete update. The current working directory is used for
# the location to enumerate and to create the precomplete file.
import sys
import os
def get_build_entries(root_path):
""" Iterates through the root_path, creating a list for each file and
directory. Excludes any file paths ending with channel-prefs.js.
"""
rel_file_path_set = set()
rel_dir_path_set = set()
for root, dirs, files in os.walk(root_path):
for file_name in files:
parent_dir_rel_path = root[len(root_path)+1:]
rel_path_file = os.path.join(parent_dir_rel_path, file_name)
rel_path_file = rel_path_file.replace("\\", "/")
if not (rel_path_file.endswith("channel-prefs.js") or
rel_path_file.endswith("update-settings.ini") or
rel_path_file.find("distribution/") != -1):
rel_file_path_set.add(rel_path_file)
for dir_name in dirs:
parent_dir_rel_path = root[len(root_path)+1:]
rel_path_dir = os.path.join(parent_dir_rel_path, dir_name)
rel_path_dir = rel_path_dir.replace("\\", "/")+"/"
if rel_path_dir.find("distribution/") == -1:
rel_dir_path_set.add(rel_path_dir)
rel_file_path_list = list(rel_file_path_set)
rel_file_path_list.sort(reverse=True)
rel_dir_path_list = list(rel_dir_path_set)
rel_dir_path_list.sort(reverse=True)
return rel_file_path_list, rel_dir_path_list
def generate_precomplete(root_path):
""" Creates the precomplete file containing the remove and rmdir
application update instructions. The given directory is used
for the location to enumerate and to create the precomplete file.
"""
rel_path_precomplete = "precomplete"
# If inside a Mac bundle use the root of the bundle for the path.
if os.path.basename(root_path) == "Resources":
root_path = os.path.abspath(os.path.join(root_path, '../../'))
rel_path_precomplete = "Contents/Resources/precomplete"
precomplete_file_path = os.path.join(root_path,rel_path_precomplete)
# Open the file so it exists before building the list of files and open it
# in binary mode to prevent OS specific line endings.
precomplete_file = open(precomplete_file_path, "wb")
rel_file_path_list, rel_dir_path_list = get_build_entries(root_path)
for rel_file_path in rel_file_path_list:
precomplete_file.writelines("remove \""+rel_file_path+"\"\n")
for rel_dir_path in rel_dir_path_list:
precomplete_file.writelines("rmdir \""+rel_dir_path+"\"\n")
precomplete_file.close()
if __name__ == "__main__":
generate_precomplete(os.getcwd())
| {
"repo_name": "kostaspl/SpiderMonkey38",
"path": "config/createprecomplete.py",
"copies": "4",
"size": "2980",
"license": "mpl-2.0",
"hash": -4383991119488843000,
"line_mean": 43.4776119403,
"line_max": 80,
"alpha_frac": 0.6573825503,
"autogenerated": false,
"ratio": 3.5182998819362457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6175682432236246,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.