Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
6,400
|
def main():
'''Command line version of tool'''
import sys
import optparse
import json
class MyParser(optparse.OptionParser):
def format_epilog(self, formatter):
return self.epilog
usage = "%prog [options] filename(s)"
parser = MyParser(usage=usage, version ="%prog " + __version__, epilog =
"""
More information:
The ``--style`` option takes a file name. See the file `citekey-style.json` in the
examples directory: The file must include only a valid JSON structure of the form:
{
"name_template" : "v{_}_|l{}",
"max_names" : "2",
"name_name_sep" : "+",
"etal" : "etal",
"anonymous" : "anon",
"lower_name" : "False",
"article" : "%(names)s:%(year)s",
"book" : "%(names)s:%(year)s",
"misc" : "%(names)s:%(year)s",
"default_type" : "%(names)s:%(year)s"
}
""")
parser.add_option("-s", "--style", action="store", type="string", \
dest="style", default = '', help="File with label format (json)")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="Print INFO messages to stdout, default=%default")
# :TODO: Add an options group stype_opts and an option for each style item
# in the style dictionary.
# get options
(options, args) = parser.parse_args()
if options.verbose:
biblabel_logger.setLevel(logging.INFO)
# update label style
if options.style:
try :
new_style = json.load(open(options.style))
citekey_label_style.update(new_style)
except __HOLE__:
biblabel_logger.error("Missing style file: %s" % options.style, exc_info=True)
exit(1)
except ValueError:
biblabel_logger.error("Invalid style file: %s. Style file should be JSON format dictionary" % options.style, exc_info=True)
exit(1)
# get database as text from .bib file(s) or stdin
if len(args) > 0 :
try :
src = ''.join(open(f).read() for f in args)
except :
biblabel_logger.error( 'Error in filelist')
else :
src = sys.stdin.read()
bfile = bibfile.BibFile()
bibgrammar.Parse(src, bfile)
used_citekeys = [] # stores created keys
for entry in bfile.entries:
label = entry.make_citekey(used_citekeys, citekey_label_style)
entry.citekey = label
used_citekeys.insert(0,label) # prepend to take advantage (in
# make_entry_citekey) of possibly sorted
# bfile
for entry in bfile.entries:
print entry
|
IOError
|
dataset/ETHPy150Open dschwilk/bibstuff/scripts/biblabel.py/main
|
6,401
|
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, __HOLE__):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
|
TypeError
|
dataset/ETHPy150Open django/django/django/db/backends/sqlite3/base.py/_sqlite_date_extract
|
6,402
|
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (__HOLE__, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
|
ValueError
|
dataset/ETHPy150Open django/django/django/db/backends/sqlite3/base.py/_sqlite_date_trunc
|
6,403
|
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, __HOLE__):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
|
TypeError
|
dataset/ETHPy150Open django/django/django/db/backends/sqlite3/base.py/_sqlite_datetime_parse
|
6,404
|
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, __HOLE__):
return None
return getattr(dt, lookup_type)
|
TypeError
|
dataset/ETHPy150Open django/django/django/db/backends/sqlite3/base.py/_sqlite_time_extract
|
6,405
|
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, __HOLE__):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
|
TypeError
|
dataset/ETHPy150Open django/django/django/db/backends/sqlite3/base.py/_sqlite_format_dtdelta
|
6,406
|
def parse_bytes(s):
if isinstance(s, six.integer_types + (float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = longint(digits_part)
except __HOLE__:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = longint(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
|
ValueError
|
dataset/ETHPy150Open docker/docker-py/docker/utils/utils.py/parse_bytes
|
6,407
|
def get_server_number(ipport, ipport2server):
server_number = ipport2server[ipport]
server, number = server_number[:-1], server_number[-1:]
try:
number = int(number)
except __HOLE__:
# probably the proxy
return server_number, None
return server, number
|
ValueError
|
dataset/ETHPy150Open openstack/swift/test/probe/common.py/get_server_number
|
6,408
|
def get_policy(**kwargs):
kwargs.setdefault('is_deprecated', False)
# go through the policies and make sure they match the
# requirements of kwargs
for policy in POLICIES:
# TODO: for EC, pop policy type here and check it first
matches = True
for key, value in kwargs.items():
try:
if getattr(policy, key) != value:
matches = False
except __HOLE__:
matches = False
if matches:
return policy
raise SkipTest('No policy matching %s' % kwargs)
|
AttributeError
|
dataset/ETHPy150Open openstack/swift/test/probe/common.py/get_policy
|
6,409
|
def read(self, amount):
if len(self.buff) < amount:
try:
self.buff += next(self)
except __HOLE__:
pass
rv, self.buff = self.buff[:amount], self.buff[amount:]
return rv
|
StopIteration
|
dataset/ETHPy150Open openstack/swift/test/probe/common.py/Body.read
|
6,410
|
def reloadConfig():
reloadCommand = []
if args.command:
reloadCommand = shlex.split(args.command)
else:
logger.debug("No reload command provided, trying to find out how to" +
" reload the configuration")
if os.path.isfile('/etc/init/haproxy.conf'):
logger.debug("we seem to be running on an Upstart based system")
reloadCommand = ['reload', 'haproxy']
elif (os.path.isfile('/usr/lib/systemd/system/haproxy.service') or
os.path.isfile('/etc/systemd/system/haproxy.service')):
logger.debug("we seem to be running on systemd based system")
reloadCommand = ['systemctl', 'reload', 'haproxy']
elif os.path.isfile('/etc/init.d/haproxy'):
logger.debug("we seem to be running on a sysvinit based system")
reloadCommand = ['/etc/init.d/haproxy', 'reload']
else:
# if no haproxy exists (maybe running in a container)
logger.debug("no haproxy detected. won't reload.")
reloadCommand = None
if reloadCommand:
logger.info("reloading using %s", " ".join(reloadCommand))
try:
start_time = time.time()
pids = get_haproxy_pids()
subprocess.check_call(reloadCommand, close_fds=True)
# Wait until the reload actually occurs
while pids == get_haproxy_pids():
time.sleep(0.1)
logger.debug("reload finished, took %s seconds",
time.time() - start_time)
except __HOLE__ as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("OSError: %s", ex)
except subprocess.CalledProcessError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("reload returned non-zero: %s", ex)
|
OSError
|
dataset/ETHPy150Open mesosphere/marathon-lb/marathon_lb.py/reloadConfig
|
6,411
|
def compareWriteAndReloadConfig(config, config_file):
# See if the last config on disk matches this, and if so don't reload
# haproxy
runningConfig = str()
try:
logger.debug("reading running config from %s", config_file)
with open(config_file, "r") as f:
runningConfig = f.read()
except __HOLE__:
logger.warning("couldn't open config file for reading")
if runningConfig != config:
logger.info(
"running config is different from generated config - reloading")
if writeConfigAndValidate(config, config_file):
reloadConfig()
else:
logger.warning("skipping reload: config not valid")
|
IOError
|
dataset/ETHPy150Open mesosphere/marathon-lb/marathon_lb.py/compareWriteAndReloadConfig
|
6,412
|
def return_detail(self, key):
"""
This will attempt to match a "detail" to look for in the room.
Args:
key (str): A detail identifier.
Returns:
detail (str or None): A detail mathing the given key.
Notes:
A detail is a way to offer more things to look at in a room
without having to add new objects. For this to work, we
require a custom `look` command that allows for `look
<detail>` - the look command should defer to this method on
the current location (if it exists) before giving up on
finding the target.
Details are not season-sensitive, but are parsed for timeslot
markers.
"""
try:
detail = self.db.details.get(key.lower(), None)
except __HOLE__:
# this happens if no attribute details is set at all
return None
if detail:
season, timeslot = self.get_time_and_season()
detail = self.replace_timeslots(detail, timeslot)
return detail
return None
|
AttributeError
|
dataset/ETHPy150Open evennia/evennia/evennia/contrib/extended_room.py/ExtendedRoom.return_detail
|
6,413
|
@patch('beets.plugins.find_plugins')
def test_listener_params(self, mock_find_plugins):
test = self
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self):
super(DummyPlugin, self).__init__()
for i in itertools.count(1):
try:
meth = getattr(self, 'dummy{0}'.format(i))
except __HOLE__:
break
self.register_listener('event{0}'.format(i), meth)
def dummy1(self, foo):
test.assertEqual(foo, 5)
def dummy2(self, foo=None):
test.assertEqual(foo, 5)
def dummy3(self):
# argument cut off
pass
def dummy4(self, bar=None):
# argument cut off
pass
def dummy5(self, bar):
test.assertFalse(True)
# more complex exmaples
def dummy6(self, foo, bar=None):
test.assertEqual(foo, 5)
test.assertEqual(bar, None)
def dummy7(self, foo, **kwargs):
test.assertEqual(foo, 5)
test.assertEqual(kwargs, {})
def dummy8(self, foo, bar, **kwargs):
test.assertFalse(True)
def dummy9(self, **kwargs):
test.assertEqual(kwargs, {"foo": 5})
d = DummyPlugin()
mock_find_plugins.return_value = d,
plugins.send('event1', foo=5)
plugins.send('event2', foo=5)
plugins.send('event3', foo=5)
plugins.send('event4', foo=5)
with self.assertRaises(TypeError):
plugins.send('event5', foo=5)
plugins.send('event6', foo=5)
plugins.send('event7', foo=5)
with self.assertRaises(TypeError):
plugins.send('event8', foo=5)
plugins.send('event9', foo=5)
|
AttributeError
|
dataset/ETHPy150Open beetbox/beets/test/test_plugins.py/ListenersTest.test_listener_params
|
6,414
|
def PILExporter(image, file_handle, extension='', **kwargs):
r"""
Given a file handle to write in to (which should act like a Python `file`
object), write out the image data. No value is returned.
Uses PIL to save the image and so supports most commonly used image
formats.
Parameters
----------
image : map:`Image` or subclass
The image data to write out.
file_handle : `file`-like object
The file to write in to
"""
from PIL.Image import EXTENSION
# The extensions are only filled out when save or open are called - which
# may not have been called before we reach here. So let's make sure that
# pillow is properly initialised.
if not EXTENSION:
from PIL.Image import init, preinit
preinit()
init()
pil_image = image.as_PILImage()
# Also, the format kwarg of PIL/Pillow is a bit confusing and actually
# refers to the underlying algorithm and not the extension. Therefore,
# we need to reach into PIL/Pillow and grab the correct format for our
# given extension.
try:
pil_extension = EXTENSION[extension]
except __HOLE__:
raise ValueError('PIL/Pillow does not support the provided '
'extension: ({})'.format(extension))
pil_image.save(file_handle, format=pil_extension)
|
KeyError
|
dataset/ETHPy150Open menpo/menpo/menpo/io/output/image.py/PILExporter
|
6,415
|
def makeTodo(value):
"""
Return a L{Todo} object built from C{value}.
If C{value} is a string, return a Todo that expects any exception with
C{value} as a reason. If C{value} is a tuple, the second element is used
as the reason and the first element as the excepted error(s).
@param value: A string or a tuple of C{(errors, reason)}, where C{errors}
is either a single exception class or an iterable of exception classes.
@return: A L{Todo} object.
"""
if isinstance(value, str):
return Todo(reason=value)
if isinstance(value, tuple):
errors, reason = value
try:
errors = list(errors)
except __HOLE__:
errors = [errors]
return Todo(reason=reason, errors=errors)
|
TypeError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/unittest.py/makeTodo
|
6,416
|
def getTimeout(self):
"""
Returns the timeout value set on this test. Checks on the instance
first, then the class, then the module, then packages. As soon as it
finds something with a C{timeout} attribute, returns that. Returns
L{util.DEFAULT_TIMEOUT_DURATION} if it cannot find anything. See
L{TestCase} docstring for more details.
"""
timeout = util.acquireAttribute(self._parents, 'timeout',
util.DEFAULT_TIMEOUT_DURATION)
try:
return float(timeout)
except (__HOLE__, TypeError):
# XXX -- this is here because sometimes people will have methods
# called 'timeout', or set timeout to 'orange', or something
# Particularly, test_news.NewsTestCase and ReactorCoreTestCase
# both do this.
warnings.warn("'timeout' attribute needs to be a number.",
category=DeprecationWarning)
return util.DEFAULT_TIMEOUT_DURATION
|
ValueError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/unittest.py/TestCase.getTimeout
|
6,417
|
def decorate(test, decorator):
"""
Decorate all test cases in C{test} with C{decorator}.
C{test} can be a test case or a test suite. If it is a test suite, then the
structure of the suite is preserved.
L{decorate} tries to preserve the class of the test suites it finds, but
assumes the presence of the C{_tests} attribute on the suite.
@param test: The C{TestCase} or C{TestSuite} to decorate.
@param decorator: A unary callable used to decorate C{TestCase}s.
@return: A decorated C{TestCase} or a C{TestSuite} containing decorated
C{TestCase}s.
"""
try:
tests = iter(test)
except __HOLE__:
return decorator(test)
# At this point, we know that 'test' is a test suite.
_clearSuite(test)
for case in tests:
test.addTest(decorate(case, decorator))
return test
|
TypeError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/unittest.py/decorate
|
6,418
|
def _iterateTests(testSuiteOrCase):
"""
Iterate through all of the test cases in C{testSuiteOrCase}.
"""
try:
suite = iter(testSuiteOrCase)
except __HOLE__:
yield testSuiteOrCase
else:
for test in suite:
for subtest in _iterateTests(test):
yield subtest
# Support for Python 2.3
|
TypeError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/unittest.py/_iterateTests
|
6,419
|
def read(filename):
PID = 0
CVE = 1
CVSS = 2
RISK = 3
HOST = 4
PROTOCOL = 5
PORT = 6
NAME = 7
SYNOPSIS = 8
DESCRIPTION = 9
SOLUTION = 10
OUTPUT = 11
host_to_vulns = {}
vuln_to_hosts = {}
id_to_name = {}
host_to_ip = {}
id_to_severity = {}
try:
with open(filename, 'rb') as csvfile:
scanreader = csv.reader(csvfile, delimiter=",", quotechar="\"")
for row in scanreader:
# first get the IP address mapping (if not already discovered) and put it in host_to_ip
if row[HOST] not in host_to_ip:
try:
host_to_ip[row[HOST]] = socket.getaddrinfo(row[HOST], 4444)[0][4][0]
except:
host_to_ip[row[HOST]] = "IP N/A"
# create id_to_name entries if they don't already exist
if row[PID] not in id_to_name:
id_to_name[row[PID]] = row[NAME]
# create id_to_severity entries if they don't already exist
if row[PID] not in id_to_severity:
id_to_severity[row[PID]] = row[RISK]
# add entry to host_to_vulns for this host and this vuln
if row[HOST] not in host_to_vulns:
host_to_vulns[row[HOST]] = set()
host_to_vulns[row[HOST]].add(row[PID])
# add entry to vuln_to_hosts for this host and this vuln
if row[PID] not in vuln_to_hosts:
vuln_to_hosts[row[PID]] = set()
vuln_to_hosts[row[PID]].add(row[HOST])
return ScanData(host_to_vulns, vuln_to_hosts, id_to_name, host_to_ip, id_to_severity)
except __HOLE__:
print "Error! CSV file was not successfully read."
exit(1)
except:
print "An unknown error occurred during parsing!"
exit(1)
|
IOError
|
dataset/ETHPy150Open maxburkhardt/nessus-parser/util/reader.py/read
|
6,420
|
def createPortItem(self, port, x, y):
""" createPortItem(port: Port, x: int, y: int) -> QGraphicsPortItem
Create a item from the port spec
"""
# pts = [(0,2),(0,-2), (2,None), (-2,None),
# (None,-2), (None,2), (-2,0), (2,0)]
# pts = [(0,0.2), (0, 0.8), (0.2, None), (0.8, None),
# (None, 0.8), (None, 0.2), (0.8,0), (0.2, 0)]
# portShape = QGraphicsPortPolygonItem(x, y, self.ghosted, self,
# port.optional, port.min_conns,
# port.max_conns, points=pts)
# portShape = QGraphicsPortTriangleItem(x, y, self.ghosted, self,
# port.optional, port.min_conns,
# port.max_conns, angle=0)
# portShape = QGraphicsPortDiamondItem(x, y, self.ghosted, self,
# port.optional, port.min_conns,
# port.max_conns)
port_klass = QGraphicsPortRectItem
kwargs = {}
shape = port.shape()
if shape is not None:
if isinstance(shape, basestring):
if shape.startswith("triangle"):
port_klass = QGraphicsPortTriangleItem
try:
kwargs['angle'] = int(shape[8:])
except __HOLE__:
kwargs['angle'] = 0
elif shape == "diamond":
port_klass = QGraphicsPortDiamondItem
elif shape == "circle" or shape == "ellipse":
port_klass = QGraphicsPortEllipseItem
else:
try:
iter(shape)
except TypeError:
pass
else:
port_klass = QGraphicsPortPolygonItem
kwargs['points'] = shape
portShape = port_klass(port, x, y, self.ghosted, self, **kwargs)
# portShape = QGraphicsPortRectItem(port, x, y, self.ghosted, self)
portShape.controller = self.controller
portShape.port = port
# do not show as invalid in search mode
if not port.is_valid and not (self.controller and self.controller.search):
portShape.setInvalid(True)
return portShape
|
ValueError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/pipeline_view.py/QGraphicsModuleItem.createPortItem
|
6,421
|
def set_module_computing(self, moduleId):
""" set_module_computing(moduleId: int) -> None
Post an event to the scene (self) for updating the module color
"""
p = self.controller.progress
if p is not None:
self.check_progress_canceled()
pipeline = self.controller.current_pipeline
try:
module = pipeline.get_module_by_id(moduleId)
except __HOLE__:
# Module does not exist in pipeline
return
p.setLabelText(module.name)
QtGui.QApplication.postEvent(self,
QModuleStatusEvent(moduleId, 4, ''))
QtCore.QCoreApplication.processEvents()
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/pipeline_view.py/QPipelineScene.set_module_computing
|
6,422
|
def is_int(self, text):
try:
int(text)
return True
except __HOLE__:
return False
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/apps/oozie/src/oozie/models.py/Dataset.is_int
|
6,423
|
def has_installed(dependency):
try:
importlib.import_module(dependency)
return True
except __HOLE__:
return False
|
ImportError
|
dataset/ETHPy150Open python-thumbnails/python-thumbnails/tests/utils.py/has_installed
|
6,424
|
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except __HOLE__:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
|
AttributeError
|
dataset/ETHPy150Open llazzaro/django-scheduler/schedule/feeds/atom.py/Feed.__get_dynamic_attr
|
6,425
|
def get_feed(self, extra_params=None):
if extra_params:
try:
obj = self.get_object(extra_params.split('/'))
except (__HOLE__, LookupError):
raise LookupError('Feed does not exist')
else:
obj = None
feed = AtomFeed(
atom_id=self.__get_dynamic_attr('feed_id', obj),
title=self.__get_dynamic_attr('feed_title', obj),
updated=self.__get_dynamic_attr('feed_updated', obj),
icon=self.__get_dynamic_attr('feed_icon', obj),
logo=self.__get_dynamic_attr('feed_logo', obj),
rights=self.__get_dynamic_attr('feed_rights', obj),
subtitle=self.__get_dynamic_attr('feed_subtitle', obj),
authors=self.__get_dynamic_attr('feed_authors', obj, default=[]),
categories=self.__get_dynamic_attr('feed_categories', obj, default=[]),
contributors=self.__get_dynamic_attr('feed_contributors', obj, default=[]),
links=self.__get_dynamic_attr('feed_links', obj, default=[]),
extra_attrs=self.__get_dynamic_attr('feed_extra_attrs', obj),
hide_generator=self.__get_dynamic_attr('hide_generator', obj, default=False)
)
items = self.__get_dynamic_attr('items', obj)
if items is None:
raise LookupError('Feed has no items field')
for item in items:
feed.add_item(
atom_id=self.__get_dynamic_attr('item_id', item),
title=self.__get_dynamic_attr('item_title', item),
updated=self.__get_dynamic_attr('item_updated', item),
content=self.__get_dynamic_attr('item_content', item),
published=self.__get_dynamic_attr('item_published', item),
rights=self.__get_dynamic_attr('item_rights', item),
source=self.__get_dynamic_attr('item_source', item),
summary=self.__get_dynamic_attr('item_summary', item),
authors=self.__get_dynamic_attr('item_authors', item, default=[]),
categories=self.__get_dynamic_attr('item_categories', item, default=[]),
contributors=self.__get_dynamic_attr('item_contributors', item, default=[]),
links=self.__get_dynamic_attr('item_links', item, default=[]),
extra_attrs=self.__get_dynamic_attr('item_extra_attrs', None, default={}),
)
if self.VALIDATE:
feed.validate()
return feed
|
AttributeError
|
dataset/ETHPy150Open llazzaro/django-scheduler/schedule/feeds/atom.py/Feed.get_feed
|
6,426
|
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open docker/compose/compose/utils.py/json_splitter
|
6,427
|
def get_key(self, network_id):
try:
return self[network_id]
except __HOLE__:
raise TunnelKeyNotFound(network_id=network_id)
|
KeyError
|
dataset/ETHPy150Open osrg/ryu/ryu/controller/tunnels.py/TunnelKeys.get_key
|
6,428
|
def delete_key(self, network_id):
try:
tunnel_key = self[network_id]
self.send_event(EventTunnelKeyDel(network_id, tunnel_key))
del self[network_id]
except __HOLE__:
raise ryu_exc.NetworkNotFound(network_id=network_id)
|
KeyError
|
dataset/ETHPy150Open osrg/ryu/ryu/controller/tunnels.py/TunnelKeys.delete_key
|
6,429
|
def get_remote_dpid(self, dpid, port_no):
try:
return self.dpids[dpid][port_no]
except __HOLE__:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
|
KeyError
|
dataset/ETHPy150Open osrg/ryu/ryu/controller/tunnels.py/DPIDs.get_remote_dpid
|
6,430
|
def delete_port(self, dpid, port_no):
try:
remote_dpid = self.dpids[dpid][port_no]
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, False))
del self.dpids[dpid][port_no]
except __HOLE__:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
|
KeyError
|
dataset/ETHPy150Open osrg/ryu/ryu/controller/tunnels.py/DPIDs.delete_port
|
6,431
|
def get_port(self, dpid, remote_dpid):
try:
dp = self.dpids[dpid]
except __HOLE__:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
res = [port_no for (port_no, remote_dpid_) in dp.items()
if remote_dpid_ == remote_dpid]
assert len(res) <= 1
if len(res) == 0:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
return res[0]
|
KeyError
|
dataset/ETHPy150Open osrg/ryu/ryu/controller/tunnels.py/DPIDs.get_port
|
6,432
|
def len(obj):
try:
return _len(obj)
except __HOLE__:
try:
# note: this is an internal undocumented API,
# don't rely on it in your own programs
return obj.__length_hint__()
except AttributeError:
raise TypeError
|
TypeError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iterlen.py/len
|
6,433
|
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except __HOLE__:
pass
else:
self.fail("Did not detect task count going negative")
|
ValueError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_queue.py/BaseQueueTestMixin.test_queue_task_done
|
6,434
|
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except __HOLE__:
pass
else:
self.fail("Did not detect task count going negative")
|
ValueError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_queue.py/BaseQueueTestMixin.test_queue_join
|
6,435
|
def server_error(request):
"""Own view in order to pass RequestContext and send an error message.
"""
exc_type, exc_info, tb = sys.exc_info()
response = "%s\n" % exc_type.__name__
response += "%s\n" % exc_info
response += "TRACEBACK:\n"
for tb in traceback.format_tb(tb):
response += "%s\n" % tb
if request.user:
response += "User: %s\n" % request.user.username
response += "\nREQUEST:\n%s" % request
try:
from_email = settings.ADMINS[0][1]
to_emails = [a[1] for a in settings.ADMINS]
except __HOLE__:
pass
else:
mail = EmailMessage(
subject="Error LFS", body=response, from_email=from_email, to=to_emails)
mail.send(fail_silently=True)
t = loader.get_template('500.html')
return HttpResponseServerError(t.render(RequestContext(request)))
|
IndexError
|
dataset/ETHPy150Open diefenbach/django-lfs/lfs/core/views.py/server_error
|
6,436
|
def generate_getconn(engine, user, password, host, port, dbname, dirname = None):
kwargs = {}
if engine == 'mysql':
# If using mysql, choose among MySQLdb or pymysql,
# Trying to load MySQLdb, and if it fails, trying
# to load and register pymysql
try:
import MySQLdb
assert MySQLdb is not None # It can never be: just avoid pyflakes warnings
except __HOLE__:
import pymysql_sa
pymysql_sa.make_default_mysql_dialect()
# In the case of MySQL, we need to activate this flag
kwargs['client_flag'] = 2
elif engine == 'sqlite':
# By default, sqlite uses a timeout of 5 seconds. Given the
# concurrency levels that WebLab-Deusto might achieve with
# multiple users in a queue, this might not be enough. We
# increase it to a minute and a half to avoid problems with
# multiple concurrent users
kwargs['timeout'] = 90
if dbname == ':memory:':
kwargs['check_same_thread'] = False
# Then load the sqlalchemy dialect. In order to do the
# equivalent to:
#
# from sqlalchemy.dialects.mysql import base
# dbi = base.dialect.dbapi()
#
# We import the module itself (sqlalchemy.dialects.mysql)
import sqlalchemy.dialects as dialects
__import__('sqlalchemy.dialects.%s' % engine)
# And once imported, we take the base.dialect.dbapi
dbi = getattr(dialects, engine).base.dialect.dbapi()
if engine == 'sqlite':
def getconn_sqlite():
return dbi.connect(database = get_sqlite_dbname(dbname, dirname), **kwargs)
getconn = getconn_sqlite
else:
def getconn_else():
kwargs.update(dict(user = user, passwd = password, host = host, db = dbname))
if port is not None:
kwargs['port'] = port
return dbi.connect(**kwargs)
getconn = getconn_else
return getconn
|
ImportError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/voodoo/dbutil.py/generate_getconn
|
6,437
|
def py2js(v):
"""Note that None values are rendered as ``null`` (not ``undefined``.
"""
# assert _for_user_profile is not None
# logger.debug("py2js(%r)",v)
for cv in CONVERTERS:
v = cv(v)
# if isinstance(v,LanguageInfo):
# return v.django_code
if isinstance(v, Value):
return v.as_ext()
# v = v.as_ext()
# if not isinstance(v, basestring):
# raise Exception("20120121b %r is of type %s" % (v,type(v)))
# return v
if isinstance(v, Promise):
# v = force_text(v)
return json.dumps(force_text(v.encode('utf8')))
if isinstance(v, types.GeneratorType):
return "".join([py2js(x) for x in v])
if etree.iselement(v):
return json.dumps(etree.tostring(v))
# if type(v) is types.GeneratorType:
# raise Exception("Please don't call the generator function yourself")
# return "\n".join([ln for ln in v])
if callable(v):
# print 20120114, repr(v)
# raise Exception("Please call the function yourself")
return "\n".join([ln for ln in v()])
if isinstance(v, js_code):
return str(v.s) # v.s might be a unicode
if v is None:
# return 'undefined'
return 'null'
if isinstance(v, (list, tuple)): # (types.ListType, types.TupleType):
elems = [py2js(x) for x in v
if (not isinstance(x, VisibleComponent))
or x.get_view_permission(_for_user_profile)]
return "[ %s ]" % ", ".join(elems)
if isinstance(v, dict):
# 20160423: removed "sorted(v.items())" because it caused
# TypeError when the dictionary contained a mixture of unicode
# and future.types.newstr objects.
try:
items = [
i for i in sorted(v.items())
if (not isinstance(v, VisibleComponent))
or v.get_view_permission(_for_user_profile)]
except __HOLE__ as e:
raise TypeError("Failed to sort {0} : {1}".format(v, e))
return "{ %s }" % ", ".join(
["%s: %s" % (py2js(k), py2js(i)) for k, i in items])
if isinstance(v, bool): # types.BooleanType:
return str(v).lower()
if isinstance(v, Quantity):
return '"%s"' % v
if isinstance(v, (int, decimal.Decimal, fractions.Fraction)):
return str(v)
if isinstance(v, IncompleteDate):
return '"%s"' % v.strftime(settings.SITE.date_format_strftime)
if isinstance(v, datetime.datetime):
return '"%s"' % v.strftime(settings.SITE.datetime_format_strftime)
if isinstance(v, datetime.time):
return '"%s"' % v.strftime(settings.SITE.time_format_strftime)
if isinstance(v, datetime.date):
if v.year < 1900:
v = IncompleteDate(v)
return '"%s"' % v.strftime(settings.SITE.date_format_strftime)
return '"%s"' % v.strftime(settings.SITE.date_format_strftime)
if isinstance(v, float):
return repr(v)
# return json.encoder.encode_basestring(v)
# print repr(v)
# http://docs.djangoproject.com/en/dev/topics/serialization/
# if not isinstance(v, (str,unicode)):
# raise Exception("20120121 %r is of type %s" % (v,type(v)))
return json.dumps(v)
# try:
# return json.dumps(v)
# except TypeError as e:
# raise TypeError("%r : %s" % (v, e))
# return json.dumps(v,cls=DjangoJSONEncoder) # http://code.djangoproject.com/ticket/3324
|
TypeError
|
dataset/ETHPy150Open lsaffre/lino/lino/utils/jsgen.py/py2js
|
6,438
|
def clean_upload_data(data):
image = data['image']
image.seek(0)
try:
pil_image = PIL.Image.open(image)
except __HOLE__ as e:
if e.errno:
error_msg = force_unicode(e)
else:
error_msg = u"Invalid or unsupported image file"
raise forms.ValidationError({"image": [error_msg]})
else:
extension = get_image_extension(pil_image)
upload_to = data['upload_to'] or None
folder_path = get_upload_foldername(image.name, upload_to=upload_to)
(w, h) = (orig_w, orig_h) = pil_image.size
sizes = data.get('sizes')
if sizes:
(min_w, min_h) = get_min_size(sizes)
if (orig_w < min_w or orig_h < min_h):
raise forms.ValidationError({"image": [(
u"Image must be at least %(min_w)sx%(min_h)s "
u"(%(min_w)s pixels wide and %(min_h)s pixels high). "
u"The image you uploaded was %(orig_w)sx%(orig_h)s pixels.") % {
"min_w": min_w,
"min_h": min_h,
"orig_w": orig_w,
"orig_h": orig_h
}]})
if w <= 0:
raise forms.ValidationError({"image": [u"Invalid image: width is %d" % w]})
elif h <= 0:
raise forms.ValidationError({"image": [u"Invalid image: height is %d" % h]})
# File is good, get rid of the tmp file
orig_file_path = os.path.join(folder_path, 'original' + extension)
image.seek(0)
image_contents = image.read()
with open(os.path.join(settings.MEDIA_ROOT, orig_file_path), 'wb+') as f:
f.write(image_contents)
md5_hash = hashlib.md5()
md5_hash.update(image_contents)
data['md5'] = md5_hash.hexdigest()
data['image'] = open(os.path.join(settings.MEDIA_ROOT, orig_file_path), mode='rb')
return data
|
IOError
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/forms.py/clean_upload_data
|
6,439
|
def _existing_object(self, pk):
"""
Avoid potentially expensive list comprehension over self.queryset()
in the parent method.
"""
if not hasattr(self, '_object_dict'):
self._object_dict = {}
if not pk:
return None
try:
obj = self.get_queryset().get(pk=pk)
except __HOLE__:
return None
else:
self._object_dict[obj.pk] = obj
return super(ThumbFormSet, self)._existing_object(pk)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open theatlantic/django-cropduster/cropduster/views/forms.py/ThumbFormSet._existing_object
|
6,440
|
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print("Error calling paste.reloader callback %r:" % file_callback,
file=sys.stderr)
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, __HOLE__):
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
elif filename.endswith('$py.class') and \
os.path.exists(filename[:-9] + '.py'):
mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
if not self.module_mtimes.has_key(filename):
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
print("%s changed; reloading..." % filename, file=sys.stderr)
return False
return True
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/reloader.py/Monitor.check_reload
|
6,441
|
def read(self, path):
logInfo = {}
logInfo["statistics"] = {}
logInfo["config"] = {}
logInfo["distribution"] = {}
logInfo["summary"] = {}
file_id = open(path, "r")
line = file_id.readline()
while(line):
config_pattern_matched = re.search(self.configPattern, line)
distribution_pattern_matched = re.search(self.distributionPattern, line)
latency_pattern_matched = re.search(self.latencyPattern, line)
stats_pattern_matched = re.search(self.statsPattern, line)
summary_pattern_matched = re.search(self.summary_pattern, line)
if config_pattern_matched:
try:
if not re.search(self.configDiffPattern, line):
logInfo["config"].update(self.readConfig(file_id))
except:
pass
elif distribution_pattern_matched:
try:
logInfo["distribution"].update(self.readDistribution(file_id))
except:
pass
elif latency_pattern_matched:
try:
logInfo["latency"] = self.readLatency(file_id)
except:
pass
elif stats_pattern_matched:
try:
logInfo["statistics"].update(self.readStats(file_id))
except:
pass
elif summary_pattern_matched:
try:
logInfo["summary"].update(self.readSummary(file_id, line))
except:
pass
try:
line = file_id.readline()
except __HOLE__:
break
return logInfo
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.read
|
6,442
|
def readStats(self, file_id):
statDic = {}
binPattern = '~([^~]+) Bin Statistics'
setPattern = '~([^~]+) Set Statistics'
servicePattern = 'Service Statistics'
nsPattern = '~([^~]+) Namespace Statistics'
xdrPattern = 'XDR Statistics'
dcPattern = '~([^~]+) DC Statistics'
sindexPattern = '~([^~]+) Sindex Statistics'
line = file_id.readline()
while(not re.search(self.section_separator, line) and not re.search(self.section_separator_with_date,line)):
if line.strip().__len__() != 0:
dic = {}
key = "key"
if re.search(binPattern, line):
if "bins" not in statDic:
statDic["bins"] = {}
dic = statDic["bins"]
key = re.search(binPattern, line).group(1)
elif re.search(setPattern, line):
if "sets" not in statDic:
statDic["sets"] = {}
dic = statDic["sets"]
key = re.search(setPattern, line).group(1)
elif re.search(servicePattern, line):
dic = statDic
key = "service"
elif re.search(nsPattern, line):
if "namespace" not in statDic:
statDic["namespace"] = {}
dic = statDic["namespace"]
key = re.search(nsPattern, line).group(1)
elif re.search(xdrPattern, line):
dic = statDic
key = "xdr"
elif re.search(dcPattern, line):
if "dc" not in statDic:
statDic["dc"] = {}
dic = statDic["dc"]
key = re.search(dcPattern, line).group(1)
elif re.search(sindexPattern, line):
if "sindex" not in statDic:
statDic["sindex"] = {}
dic = statDic["sindex"]
key = re.search(sindexPattern, line).group(1)
dic[key] = self.htableToDic(file_id)
try:
line = file_id.readline()
except __HOLE__:
break
return statDic
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.readStats
|
6,443
|
def readConfig(self, file_id):
configDic = {}
servicePattern = '(~+)Service Configuration(~+)'
netPattern = '(~+)Network Configuration(~+)'
nsPattern = '~([^~]+)Namespace Configuration(~+)'
xdrPattern = '(~+)XDR Configuration(~+)'
dcPattern = '~([^~]+)DC Configuration(~+)'
line = file_id.readline()
while(not re.search(self.section_separator, line) and not re.search(self.section_separator_with_date,line)):
if line.strip().__len__() != 0:
dic = {}
key = "key"
if re.search(servicePattern, line):
dic = configDic
key = "service"
elif re.search(netPattern, line):
dic = configDic
key = "network"
elif re.search(nsPattern, line):
if "namespace" not in configDic:
configDic["namespace"] = {}
dic = configDic["namespace"]
key = re.search(nsPattern, line).group(1).strip()
elif re.search(xdrPattern, line):
dic = configDic
key = "xdr"
elif re.search(dcPattern, line):
if "dc" not in configDic:
configDic["dc"] = {}
dic = configDic["dc"]
key = re.search(dcPattern, line).group(1).strip()
dic[key] = self.htableToDic(file_id)
try:
line = file_id.readline()
except __HOLE__:
break
return configDic
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.readConfig
|
6,444
|
def readLatency(self, file_id):
configDic = {}
pattern = '~([^~]+) Latency(~+)'
line = file_id.readline()
while(not re.search(self.section_separator, line) and not re.search(self.section_separator_with_date,line)):
if line.strip().__len__() != 0:
m1 = re.search(pattern, line)
if m1:
dic = configDic
key = m1.group(1).strip()
dic[key] = self.latencyTableToDic(file_id)
try:
line = file_id.readline()
except __HOLE__:
break
return configDic
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.readLatency
|
6,445
|
def readSummaryStr(self, file_id):
line = file_id.readline()
summaryStr = ""
while(not re.search(self.section_separator, line) and not re.search(self.section_separator_with_date,line)):
if line.strip().__len__() != 0:
summaryStr += line
try:
line = file_id.readline()
except __HOLE__:
break
return summaryStr
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.readSummaryStr
|
6,446
|
def readDistribution(self, file_id):
configDic = {}
ttlPattern = '~([^~]+) - TTL Distribution in Seconds(~+)'
evictPattern = '~([^~]+) - Eviction Distribution in Seconds(~+)'
objszPattern = '~([^~]+) - Object Size Distribution in Record Blocks(~+)'
objszBytesPattern = '([^~]+) - Object Size Distribution in Bytes'
line = file_id.readline()
bytewise_distribution = False
while(not re.search(self.section_separator, line) and not re.search(self.section_separator_with_date,line)):
if line.strip().__len__() != 0 :
m1 = re.search(ttlPattern, line)
m2 = re.search(evictPattern, line)
m3 = re.search(objszPattern, line)
m4 = re.search(objszBytesPattern, line)
dic = {}
key = "key"
if m1:
if "ttl" not in configDic:
configDic["ttl"] = {}
dic = configDic["ttl"]
key = m1.group(1).strip()
elif m2:
if "evict" not in configDic:
configDic["evict"] = {}
dic = configDic["evict"]
key = m2.group(1).strip()
elif m3:
if "objsz" not in configDic:
configDic["objsz"] = {}
dic = configDic["objsz"]
key = m3.group(1).strip()
elif m4:
if "objsz-b" not in configDic:
configDic["objsz-b"] = {}
dic = configDic["objsz-b"]
key = m4.group(1).strip()
bytewise_distribution = True
if bytewise_distribution:
columns, dic[key] = self.vtable_to_dic(file_id)
dic[key]['columns'] = columns
else:
dic[key] = self.dist_table_to_dic(file_id)
try:
line = file_id.readline()
except __HOLE__:
break
return configDic
|
IndexError
|
dataset/ETHPy150Open aerospike/aerospike-admin/lib/logreader.py/LogReader.readDistribution
|
6,447
|
def post_dispatch_input(self, etype, me):
'''This function is called by dispatch_input() when we want to dispatch
an input event. The event is dispatched to all listeners and if
grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios' or root_window._density != 1:
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except __HOLE__:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
|
AttributeError
|
dataset/ETHPy150Open kivy/kivy/kivy/base.py/EventLoopBase.post_dispatch_input
|
6,448
|
def extract_features_using_pefile(self, pe):
''' Process the PE File using the Python pefile module. '''
# Store all extracted features into feature lists
extracted_dense = {}
extracted_sparse = {}
# Now slog through the info and extract the features
feature_not_found_flag = -99
feature_default_value = 0
self._warnings = []
# Set all the dense features and sparse features to 'feature not found'
# value and then check later to see if it was found
for feature in self._dense_feature_list:
extracted_dense[feature] = feature_not_found_flag
for feature in self._sparse_feature_list:
extracted_sparse[feature] = feature_not_found_flag
# Check to make sure all the section names are standard
std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata', \
'.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls', \
'.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata']
for i in range(200):
std_sections.append('/'+str(i))
std_section_names = 1
extracted_sparse['section_names'] = []
for section in pe.sections:
name = convertToAsciiNullTerm(section.Name).lower()
extracted_sparse['section_names'].append(name)
if (name not in std_sections):
std_section_names = 0
extracted_dense['std_section_names'] = std_section_names
extracted_dense['debug_size'] = pe.OPTIONAL_HEADER.DATA_DIRECTORY[6].Size
extracted_dense['major_version'] = pe.OPTIONAL_HEADER.MajorImageVersion
extracted_dense['minor_version'] = pe.OPTIONAL_HEADER.MinorImageVersion
extracted_dense['iat_rva'] = pe.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress
extracted_dense['export_size'] = pe.OPTIONAL_HEADER.DATA_DIRECTORY[0].Size
extracted_dense['check_sum'] = pe.OPTIONAL_HEADER.CheckSum
try:
extracted_dense['generated_check_sum'] = pe.generate_checksum()
except __HOLE__:
#self._logger.logMessage('warning', 'pe.generate_check_sum() threw an exception, setting to 0!')
extracted_dense['generated_check_sum'] = 0
if (len(pe.sections) > 0):
extracted_dense['virtual_address'] = pe.sections[0].VirtualAddress
extracted_dense['virtual_size'] = pe.sections[0].Misc_VirtualSize
extracted_dense['number_of_sections'] = pe.FILE_HEADER.NumberOfSections
extracted_dense['compile_date'] = pe.FILE_HEADER.TimeDateStamp
extracted_dense['number_of_rva_and_sizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
extracted_dense['total_size_pe'] = len(pe.__data__)
# Number of import and exports
if hasattr(pe,'DIRECTORY_ENTRY_IMPORT'):
extracted_dense['number_of_imports'] = len(pe.DIRECTORY_ENTRY_IMPORT)
num_imported_symbols = 0
for module in pe.DIRECTORY_ENTRY_IMPORT:
num_imported_symbols += len(module.imports)
extracted_dense['number_of_import_symbols'] = num_imported_symbols
if hasattr(pe, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
extracted_dense['number_of_bound_imports'] = len(pe.DIRECTORY_ENTRY_BOUND_IMPORT)
num_imported_symbols = 0
for module in pe.DIRECTORY_ENTRY_BOUND_IMPORT:
num_imported_symbols += len(module.entries)
extracted_dense['number_of_bound_import_symbols'] = num_imported_symbols
if hasattr(pe,'DIRECTORY_ENTRY_EXPORT'):
extracted_dense['number_of_export_symbols'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
symbol_set = set()
for symbol in pe.DIRECTORY_ENTRY_EXPORT.symbols:
symbol_info = 'unknown'
if (not symbol.name):
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
symbol_set.add(convertToUTF8('%s'%(symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['ExportedSymbols'] = list(symbol_set)
# Specific Import info (Note this will be a sparse field woo hoo!)
if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
symbol_set = set()
for module in pe.DIRECTORY_ENTRY_IMPORT:
for symbol in module.imports:
symbol_info = 'unknown'
if symbol.import_by_ordinal is True:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
#symbol_info['hint'] = symbol.hint
if symbol.bound:
symbol_info += ' bound=' + str(symbol.bound)
symbol_set.add(convertToUTF8('%s:%s'%(module.dll, symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['imported_symbols'] = list(symbol_set)
# Do we have a second section
if (len(pe.sections) >= 2):
extracted_dense['virtual_size_2'] = pe.sections[1].Misc_VirtualSize
extracted_dense['size_image'] = pe.OPTIONAL_HEADER.SizeOfImage
extracted_dense['size_code'] = pe.OPTIONAL_HEADER.SizeOfCode
extracted_dense['size_initdata'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
extracted_dense['size_uninit'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
extracted_dense['pe_majorlink'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
extracted_dense['pe_minorlink'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
extracted_dense['pe_driver'] = 1 if pe.is_driver() else 0
extracted_dense['pe_exe'] = 1 if pe.is_exe() else 0
extracted_dense['pe_dll'] = 1 if pe.is_dll() else 0
extracted_dense['pe_i386'] = 1
if pe.FILE_HEADER.Machine != 0x014c:
extracted_dense['pe_i386'] = 0
extracted_dense['pe_char'] = pe.FILE_HEADER.Characteristics
# Data directory features!!
datadirs = { 0: 'IMAGE_DIRECTORY_ENTRY_EXPORT', 1:'IMAGE_DIRECTORY_ENTRY_IMPORT', 2:'IMAGE_DIRECTORY_ENTRY_RESOURCE', 5:'IMAGE_DIRECTORY_ENTRY_BASERELOC', 12:'IMAGE_DIRECTORY_ENTRY_IAT'}
for idx, datadir in datadirs.items():
datadir = pefile.DIRECTORY_ENTRY[ idx ]
if len(pe.OPTIONAL_HEADER.DATA_DIRECTORY) <= idx:
continue
directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
extracted_dense['datadir_%s_size' % datadir] = directory.Size
# Section features
section_flags = ['IMAGE_SCN_MEM_EXECUTE', 'IMAGE_SCN_CNT_CODE', 'IMAGE_SCN_MEM_WRITE', 'IMAGE_SCN_MEM_READ']
rawexecsize = 0
vaexecsize = 0
for sec in pe.sections:
if not sec:
continue
for char in section_flags:
# does the section have one of our attribs?
if hasattr(sec, char):
rawexecsize += sec.SizeOfRawData
vaexecsize += sec.Misc_VirtualSize
break
# Take out any weird characters in section names
secname = convertToAsciiNullTerm(sec.Name).lower()
secname = secname.replace('.','')
extracted_dense['sec_entropy_%s' % secname ] = sec.get_entropy()
extracted_dense['sec_rawptr_%s' % secname] = sec.PointerToRawData
extracted_dense['sec_rawsize_%s' % secname] = sec.SizeOfRawData
extracted_dense['sec_vasize_%s' % secname] = sec.Misc_VirtualSize
extracted_dense['sec_va_execsize'] = vaexecsize
extracted_dense['sec_raw_execsize'] = rawexecsize
# Register if there were any pe warnings
warnings = pe.get_warnings()
if (warnings):
extracted_dense['pe_warnings'] = 1
extracted_sparse['pe_warning_strings'] = warnings
else:
extracted_dense['pe_warnings'] = 0
# Issue a warning if the feature isn't found
for feature in self._dense_feature_list:
if (extracted_dense[feature] == feature_not_found_flag):
extracted_dense[feature] = feature_default_value
if (self._verbose):
self.log('info: Feature: %s not found! Setting to %d' % (feature, feature_default_value))
self._warnings.append('Feature: %s not found! Setting to %d' % (feature, feature_default_value))
# Issue a warning if the feature isn't found
for feature in self._sparse_feature_list:
if (extracted_sparse[feature] == feature_not_found_flag):
extracted_sparse[feature] = feature_default_value
if (self._verbose):
self.log('info: Feature: %s not found! Setting to %d' % (feature, feature_default_value))
self._warnings.append('Feature: %s not found! Setting to %d' % (feature, feature_default_value))
# Set the features for the class var
self._dense_features = extracted_dense
self._sparse_features = extracted_sparse
return self.get_dense_features() #, self.get_sparse_features()
# Helper functions
|
ValueError
|
dataset/ETHPy150Open ClickSecurity/data_hacking/pefile_classification/pe_features.py/PEFileFeatures.extract_features_using_pefile
|
6,449
|
def __init__(self, bot):
self.bot = bot
self.db = os.path.expanduser(
bot.config.get('human', '~/.irc3/human.db'))
self.delay = (2, 5)
try:
os.makedirs(os.path.dirname(self.db))
except __HOLE__:
pass
if not os.path.isfile(self.db): # pragma: no cover
self.initialize(15)
|
OSError
|
dataset/ETHPy150Open gawel/irc3/irc3/plugins/human.py/Human.__init__
|
6,450
|
def _parse_bytes(range_header):
"""Parses a full HTTP Range header.
Args:
range_header: The str value of the Range header.
Returns:
A tuple (units, parsed_ranges) where:
units: A str containing the units of the Range header, e.g. "bytes".
parsed_ranges: A list of (start, end) tuples in the form that
_parsed_range_value returns.
"""
try:
parsed_ranges = []
units, ranges = range_header.split('=', 1)
for range_value in ranges.split(','):
range_value = range_value.strip()
if range_value:
parsed_ranges.append(_parse_range_value(range_value))
if not parsed_ranges:
return None
return units, parsed_ranges
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/webapp/blobstore_handlers.py/_parse_bytes
|
6,451
|
@attr('numpy')
def test_biadjacency_matrix_weight(self):
try:
import numpy
except __HOLE__:
raise SkipTest('numpy not available.')
G=nx.path_graph(5)
G.add_edge(0,1,weight=2,other=4)
X=[1,3]
Y=[0,2,4]
M = bipartite.biadjacency_matrix(G,X,weight='weight')
assert_equal(M[0,0], 2)
M = bipartite.biadjacency_matrix(G, X, weight='other')
assert_equal(M[0,0], 4)
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/bipartite/tests/test_basic.py/TestBipartiteBasic.test_biadjacency_matrix_weight
|
6,452
|
@attr('numpy')
def test_biadjacency_matrix(self):
try:
import numpy
except __HOLE__:
raise SkipTest('numpy not available.')
tops = [2,5,10]
bots = [5,10,15]
for i in range(len(tops)):
G = nx.bipartite_random_graph(tops[i], bots[i], 0.2)
top = [n for n,d in G.nodes(data=True) if d['bipartite']==0]
M = bipartite.biadjacency_matrix(G, top)
assert_equal(M.shape[0],tops[i])
assert_equal(M.shape[1],bots[i])
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/bipartite/tests/test_basic.py/TestBipartiteBasic.test_biadjacency_matrix
|
6,453
|
@attr('numpy')
def test_biadjacency_matrix_order(self):
try:
import numpy
except __HOLE__:
raise SkipTest('numpy not available.')
G=nx.path_graph(5)
G.add_edge(0,1,weight=2)
X=[3,1]
Y=[4,2,0]
M = bipartite.biadjacency_matrix(G,X,Y,weight='weight')
assert_equal(M[1,2], 2)
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/bipartite/tests/test_basic.py/TestBipartiteBasic.test_biadjacency_matrix_order
|
6,454
|
def _get_docs_from_pyobject(obj, options, progress_estimator):
progress_estimator.complete += 1
log.progress(progress_estimator.progress(), repr(obj))
if not options.introspect:
log.error("Cannot get docs for Python objects without "
"introspecting them.")
introspect_doc = parse_doc = None
introspect_error = parse_error = None
try:
introspect_doc = introspect_docs(value=obj)
except __HOLE__, e:
log.error(e)
return (None, None)
if options.parse:
if introspect_doc.canonical_name is not None:
prev_introspect = options.introspect
options.introspect = False
try:
_, parse_docs = _get_docs_from_pyname(
str(introspect_doc.canonical_name), options,
progress_estimator, supress_warnings=True)
finally:
options.introspect = prev_introspect
# We need a name:
if introspect_doc.canonical_name in (None, UNKNOWN):
if hasattr(obj, '__name__'):
introspect_doc.canonical_name = DottedName(
DottedName.UNREACHABLE, obj.__name__)
else:
introspect_doc.canonical_name = DottedName(
DottedName.UNREACHABLE)
return (introspect_doc, parse_doc)
|
ImportError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docbuilder.py/_get_docs_from_pyobject
|
6,455
|
def _get_docs_from_pyname(name, options, progress_estimator,
supress_warnings=False):
progress_estimator.complete += 1
if options.must_introspect(name) or options.must_parse(name):
log.progress(progress_estimator.progress(), name)
introspect_doc = parse_doc = None
introspect_error = parse_error = None
if options.must_introspect(name):
try:
introspect_doc = introspect_docs(name=name)
except __HOLE__, e:
introspect_error = str(e)
if options.must_parse(name):
try:
parse_doc = parse_docs(name=name)
except ParseError, e:
parse_error = str(e)
except ImportError, e:
# If we get here, then there' probably no python source
# available; don't bother to generate a warnining.
pass
# Report any errors we encountered.
if not supress_warnings:
_report_errors(name, introspect_doc, parse_doc,
introspect_error, parse_error)
# Return the docs we found.
return (introspect_doc, parse_doc)
|
ImportError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docbuilder.py/_get_docs_from_pyname
|
6,456
|
def _get_docs_from_pyscript(filename, options, progress_estimator):
# [xx] I should be careful about what names I allow as filenames,
# and maybe do some munging to prevent problems.
introspect_doc = parse_doc = None
introspect_error = parse_error = None
if options.introspect:
try:
introspect_doc = introspect_docs(filename=filename, is_script=True)
if introspect_doc.canonical_name is UNKNOWN:
introspect_doc.canonical_name = munge_script_name(filename)
except __HOLE__, e:
introspect_error = str(e)
if options.parse:
try:
parse_doc = parse_docs(filename=filename, is_script=True)
except ParseError, e:
parse_error = str(e)
except ImportError, e:
parse_error = str(e)
# Report any errors we encountered.
_report_errors(filename, introspect_doc, parse_doc,
introspect_error, parse_error)
# Return the docs we found.
return (introspect_doc, parse_doc)
|
ImportError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docbuilder.py/_get_docs_from_pyscript
|
6,457
|
def _get_docs_from_module_file(filename, options, progress_estimator,
parent_docs=(None,None)):
"""
Construct and return the API documentation for the python
module with the given filename.
@param parent_docs: The C{ModuleDoc} of the containing package.
If C{parent_docs} is not provided, then this method will
check if the given filename is contained in a package; and
if so, it will construct a stub C{ModuleDoc} for the
containing package(s). C{parent_docs} is a tuple, where
the first element is the parent from introspection, and
the second element is the parent from parsing.
"""
# Record our progress.
modulename = os.path.splitext(os.path.split(filename)[1])[0]
if modulename == '__init__':
modulename = os.path.split(os.path.split(filename)[0])[1]
if parent_docs[0]:
modulename = DottedName(parent_docs[0].canonical_name, modulename)
elif parent_docs[1]:
modulename = DottedName(parent_docs[1].canonical_name, modulename)
if options.must_introspect(modulename) or options.must_parse(modulename):
log.progress(progress_estimator.progress(),
'%s (%s)' % (modulename, filename))
progress_estimator.complete += 1
# Normalize the filename.
filename = os.path.normpath(os.path.abspath(filename))
# When possible, use the source version of the file.
try:
filename = py_src_filename(filename)
src_file_available = True
except ValueError:
src_file_available = False
# Get the introspected & parsed docs (as appropriate)
introspect_doc = parse_doc = None
introspect_error = parse_error = None
if options.must_introspect(modulename):
try:
introspect_doc = introspect_docs(
filename=filename, context=parent_docs[0])
if introspect_doc.canonical_name is UNKNOWN:
introspect_doc.canonical_name = modulename
except __HOLE__, e:
introspect_error = str(e)
if src_file_available and options.must_parse(modulename):
try:
parse_doc = parse_docs(
filename=filename, context=parent_docs[1])
except ParseError, e:
parse_error = str(e)
except ImportError, e:
parse_error = str(e)
# Report any errors we encountered.
_report_errors(filename, introspect_doc, parse_doc,
introspect_error, parse_error)
# Return the docs we found.
return (introspect_doc, parse_doc)
|
ImportError
|
dataset/ETHPy150Open ardekantur/pyglet/tools/epydoc/epydoc/docbuilder.py/_get_docs_from_module_file
|
6,458
|
def import_module(module):
"""
Allows custom providers, configurators and distros.
Import the provider, configurator, or distro module via a string.
ex. ``bootmachine.contrib.providers.rackspace_openstack_v2``
ex. ``bootmachine.contrib.configurators.salt``
ex. ``bootmachine.contrib.distros.arch_201208``
"""
try:
__import__(module)
return sys.modules[module]
except __HOLE__:
abort("Unable to import the module: {0}".format(module))
# import provider and configurator here so their fabric tasks are properly namespaced
|
ImportError
|
dataset/ETHPy150Open rizumu/bootmachine/bootmachine/core.py/import_module
|
6,459
|
@task
def reboot_server(name):
"""
Simply reboot a server by name.
The trick here is to change the env vars to that of the server
to be rebooted. Perform the reboot and change env vars back
to their original value.
Usage:
fab reboot_server:name
"""
__shared_setup()
try:
server = [s for s in env.bootmachine_servers if s.name == name][0]
except IndexError:
abort("The server '{0}' was not found.".format(name))
original_user = env.user
original_host_string = env.host_string
try:
env.port = 22
telnetlib.Telnet(server.public_ip, env.port)
env.user = "root"
except __HOLE__:
env.port = int(settings.SSH_PORT)
env.user = getpass.getuser()
telnetlib.Telnet(server.public_ip, env.port)
env.host_string = "{0}:{1}".format(server.public_ip, env.port)
env.keepalive = 30 # keep the ssh key active, see fabric issue #402
with fabric_settings(warn_only=True):
reboot()
env.user = original_user
env.host_string = original_host_string
|
IOError
|
dataset/ETHPy150Open rizumu/bootmachine/bootmachine/core.py/reboot_server
|
6,460
|
def __set_ssh_vars(valid_object):
"""
This method takes a valid_object, either the env or a server,
and based on the results of telnet, it sets port, user,
host_string varibles for ssh. It also sets a configured
variable if the SSH_PORT matches that in the settings. This
would only match if the server is properly configured.
"""
if valid_object == env:
public_ip = env.host
else:
public_ip = valid_object.public_ip
try:
port = 22
telnetlib.Telnet(public_ip, port)
except __HOLE__:
port = int(settings.SSH_PORT)
telnetlib.Telnet(public_ip, port)
valid_object.port = port
if valid_object.port == 22:
valid_object.configured = False
valid_object.user = "root"
else:
valid_object.configured = True
valid_object.user = getpass.getuser()
valid_object.host_string = "{0}:{1}".format(public_ip, port)
return valid_object
|
IOError
|
dataset/ETHPy150Open rizumu/bootmachine/bootmachine/core.py/__set_ssh_vars
|
6,461
|
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except __HOLE__, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
if self.stderr:
se = file(self.stderr, 'a+', 0)
os.dup2(se.fileno(), sys.stderr.fileno())
else:
se = sys.stderr
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print "Started"
# Write pidfile
atexit.register(self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
|
OSError
|
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/utils/daemon.py/Daemon.daemonize
|
6,462
|
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print "Starting..."
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except __HOLE__:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
|
IOError
|
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/utils/daemon.py/Daemon.start
|
6,463
|
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print "Stopping..."
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except __HOLE__:
pid = None
except ValueError:
pid = None
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
if self.verbose >= 1:
print "Stopped"
|
IOError
|
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/utils/daemon.py/Daemon.stop
|
6,464
|
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except __HOLE__:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
|
ValueError
|
dataset/ETHPy150Open dask/dask/dask/array/reductions.py/_nanargmin
|
6,465
|
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except __HOLE__:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
|
ValueError
|
dataset/ETHPy150Open dask/dask/dask/array/reductions.py/_nanargmax
|
6,466
|
def callit(f, funct):
try:
funct(f)
except (__HOLE__, Exception), err:
f.write(str(err)+'\n')
|
SystemExit
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/envirodump.py/callit
|
6,467
|
def decode(data):
"""
Decode data employing some charset detection and including unicode BOM
stripping.
"""
# Don't make more work than we have to.
if not isinstance(data, str):
return data
# Detect standard unicodes.
for bom, encoding in UNICODES:
if data.startswith(bom):
return unicode(data[len(bom):], encoding, "ignore")
# Try straight UTF-8
try:
return unicode(data, "utf-8")
except:
pass
# Test for latin_1, because it can be matched as UTF-16
# Somewhat of a hack, but it works and is about a thousand times faster
# than using chardet.
if all(ord(c) < 256 for c in data):
try:
return unicode(data, "latin_1")
except:
pass
# Test for various common encodings.
for encoding in COMMON_ENCODINGS:
try:
return unicode(data, encoding)
except __HOLE__:
pass
# Anything else gets filtered.
return unicode(textfilter.filter_ascii(data), errors="replace")
|
UnicodeDecodeError
|
dataset/ETHPy150Open mozilla/app-validator/appvalidator/unicodehelper.py/decode
|
6,468
|
def UrlEncode(s):
try:
return urllib.parse.quote_plus(s)
except __HOLE__:
return urllib.quote_plus(s)
|
AttributeError
|
dataset/ETHPy150Open coronalabs/CoronaSDK-SublimeText/corona_docs.py/UrlEncode
|
6,469
|
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
To use this function safely you must use the return value. In Python 2.3,
L{mergeFunctionMetadata} will create a new function. In later versions of
Python, C{g} will be mutated and returned.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
try:
merged = types.FunctionType(
g.func_code, g.func_globals,
f.__name__, inspect.getargspec(g)[-1],
g.func_closure)
except TypeError:
pass
else:
merged = g
try:
merged.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
merged.__dict__.update(g.__dict__)
merged.__dict__.update(f.__dict__)
except (TypeError, __HOLE__):
pass
merged.__module__ = f.__module__
return merged
|
AttributeError
|
dataset/ETHPy150Open bokeh/bokeh/bokeh/util/deprecate.py/mergeFunctionMetadata
|
6,470
|
def read_list(self):
# Clear rules and move on if file isn't there
if not os.path.exists(self.list_file):
self.regex_list = []
return
try:
mtime = os.path.getmtime(self.list_file)
except __HOLE__:
log.err("Failed to get mtime of %s" % self.list_file)
return
if mtime <= self.rules_last_read:
return
# Begin read
new_regex_list = []
for line in open(self.list_file):
pattern = line.strip()
if line.startswith('#') or not pattern:
continue
try:
new_regex_list.append(re.compile(pattern))
except re.error:
log.err("Failed to parse '%s' in '%s'. Ignoring line" % (pattern, self.list_file))
self.regex_list = new_regex_list
self.rules_last_read = mtime
|
OSError
|
dataset/ETHPy150Open graphite-project/carbon/lib/carbon/regexlist.py/RegexList.read_list
|
6,471
|
def _saveModel(self):
delimiter = self._delimiterBox.currentSelected()
header = self._headerCheckBox.isChecked() # column labels
filename = self._filenameLineEdit.text()
index = False # row labels
encodingIndex = self._encodingComboBox.currentIndex()
encoding = self._encodingComboBox.itemText(encodingIndex)
encoding = _calculateEncodingKey(encoding.lower())
try:
dataFrame = self._model.dataFrame()
except AttributeError, err:
raise AttributeError('No data loaded to export.')
else:
try:
dataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)
except __HOLE__, err:
raise IOError('No filename given')
except UnicodeError, err:
raise UnicodeError('Could not encode all data. Choose a different encoding')
except Exception:
raise
|
IOError
|
dataset/ETHPy150Open datalyze-solutions/pandas-qt/pandasqt/views/CSVDialogs.py/CSVExportDialog._saveModel
|
6,472
|
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except __HOLE__, e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/tests/regressiontests/model_fields/tests.py/BasicFieldTests.test_nullbooleanfield_blank
|
6,473
|
def sniff(self):
self.initialize_socket()
try:
while(True):
try:
data = self.sock.recv(1024)
request = unpack_lifx_message(data)
print("\nRECV:"),
print(request)
except timeout:
pass
except __HOLE__:
self.sock.close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open mclarkk/lifxlan/examples/sniffer.py/Sniffer.sniff
|
6,474
|
def _parseLine(self, line):
try:
key, val = line.split(':', 1)
except __HOLE__:
# unpack list of wrong size
# -> invalid input data
raise LDIFLineWithoutSemicolonError, line
val = self.parseValue(val)
return key, val
|
ValueError
|
dataset/ETHPy150Open antong/ldaptor/ldaptor/protocols/ldap/ldifprotocol.py/LDIF._parseLine
|
6,475
|
def state_HEADER(self, line):
key, val = self._parseLine(line)
self.mode = WAIT_FOR_DN
if key != 'version':
self.logicalLineReceived(line)
else:
try:
version = int(val)
except __HOLE__:
raise LDIFVersionNotANumberError, val
self.version = version
if version > 1:
raise LDIFUnsupportedVersionError, version
|
ValueError
|
dataset/ETHPy150Open antong/ldaptor/ldaptor/protocols/ldap/ldifprotocol.py/LDIF.state_HEADER
|
6,476
|
def __init__(self, **kwargs):
super(Route53Service, self).__init__(**kwargs)
self.conn = boto.connect_route53()
try:
self.hostname = os.environ['EC2_PUBLIC_HOSTNAME']
except __HOLE__:
app.logger.warn("We cannot register a domain on non ec2 instances")
|
KeyError
|
dataset/ETHPy150Open Netflix/security_monkey/security_monkey/common/route53.py/Route53Service.__init__
|
6,477
|
def PatternResolution(
quad, cursor, BRPs, orderByTriple=True, fetchall=True,
fetchContexts=False, select_modifier=''):
"""
This function implements query pattern resolution against a list of
partition objects and 3 parameters specifying whether to sort the result
set (in order to group identical triples by the contexts in which they
appear), whether to fetch the entire result set or one at a time, and
whether to fetch the matching contexts only or the assertions. This
function uses BinaryRelationPartitionCoverage to whittle out the
partitions that don't need to be searched, generateHashIntersections /
generateWhereClause to generate the SQL query and the parameter fill-ins
and creates a single UNION query against the relevant partitions.
Note the use of UNION syntax requires that the literal properties
partition is first (since it uses the first select to determine the
column types for the resulting rows from the subsequent SELECT queries)
see: http://dev.mysql.com/doc/refman/5.0/en/union.html
"""
subject,predicate,object_,context = quad
targetBRPs = BinaryRelationPartitionCoverage(
(subject,predicate,object_,context),BRPs)
unionQueries = []
unionQueriesParams = []
for brp in targetBRPs:
first = targetBRPs.index(brp) == 0
if fetchContexts:
query = "SELECT DISTINCT %s FROM %s %s WHERE " % \
(','.join(brp.selectContextFields(first)),
brp,
brp._intersectionSQL)
else:
query = CROSS_BRP_QUERY_SQL % \
(select_modifier,
','.join(brp.selectFields(first)),
brp,
brp._intersectionSQL)
whereClause,whereParameters = brp.generateWhereClause(
(subject,predicate,object_,context))
unionQueries.append(query+whereClause)
unionQueriesParams.extend(whereParameters)
if fetchContexts:
orderBySuffix = ''
else:
orderBySuffix = orderByTriple and ' ORDER BY %s,%s,%s' % \
(SlotPrefixes[SUBJECT],
SlotPrefixes[PREDICATE],
SlotPrefixes[OBJECT]) or ''
if len(unionQueries) == 1:
query = unionQueries[0] + orderBySuffix
else:
query = ' union all '.join(['('+q+')'
for q in unionQueries]) + orderBySuffix
try:
if EXPLAIN_INFO:
cursor.execute("EXPLAIN "+query,tuple(unionQueriesParams))
print query
from pprint import pprint
pprint(cursor.fetchall())
cursor.execute(query,tuple(unionQueriesParams))
except __HOLE__,e:
print "## Query ##\n",query
print "## Parameters ##\n",unionQueriesParams
raise e
if fetchall:
qRT = cursor.fetchall()
else:
qRT = cursor.fetchone()
return qRT
|
ValueError
|
dataset/ETHPy150Open RDFLib/rdfextras/rdfextras/store/FOPLRelationalModel/BinaryRelationPartition.py/PatternResolution
|
6,478
|
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except __HOLE__:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
|
IndexError
|
dataset/ETHPy150Open evennia/evennia/evennia/utils/ansi.py/_on_raw
|
6,479
|
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in range(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except __HOLE__:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
|
IndexError
|
dataset/ETHPy150Open evennia/evennia/evennia/utils/ansi.py/ANSIString._slice
|
6,480
|
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except __HOLE__:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in range(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
|
IndexError
|
dataset/ETHPy150Open evennia/evennia/evennia/utils/ansi.py/ANSIString.__getitem__
|
6,481
|
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except __HOLE__:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
|
IndexError
|
dataset/ETHPy150Open evennia/evennia/evennia/utils/ansi.py/ANSIString._get_interleving
|
6,482
|
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=list(range(0, len(line))),
clean_string=char)
try:
start = char._code_indexes[0]
except __HOLE__:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(range(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
|
IndexError
|
dataset/ETHPy150Open evennia/evennia/evennia/utils/ansi.py/ANSIString._filler
|
6,483
|
def to_internal_value(self, value):
try:
year, month = [int(el) for el in value.split('.')]
except __HOLE__:
raise serializers.ValidationError('Value "{}" should be valid be in format YYYY.MM'.format(value))
if not 0 < month < 13:
raise serializers.ValidationError('Month has to be from 1 to 12')
return year, month
|
ValueError
|
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/cost_tracking/serializers.py/YearMonthField.to_internal_value
|
6,484
|
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
if len(client_address) > 2:
client_address = (client_address[0], client_address[1])
except socket.timeout:
continue
try:
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
rst.mid = self._messageLayer._current_mid
self._messageLayer._current_mid += 1 % 65535
self.send_datagram(rst)
continue
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated, transaction completed")
if transaction.response is not None:
self.send_datagram(transaction.response)
continue
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated, transaction NOT completed")
self._send_ack(transaction)
continue
args = (transaction, )
t = threading.Thread(target=self.receive_request, args=args)
t.start()
# self.receive_datagram(data, client_address)
elif isinstance(message, Response):
logger.error("Received response from %s", message.source)
else: # is Message
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
with transaction:
self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
except __HOLE__:
print "Exception with Executor"
self._socket.close()
|
RuntimeError
|
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/server/coap.py/CoAP.listen
|
6,485
|
def add_resource(self, path, resource):
"""
Helper function to add resources to the resource directory during server initialization.
:param path: the path for the new created resource
:type resource: Resource
:param resource: the resource to be added
"""
assert isinstance(resource, Resource)
path = path.strip("/")
paths = path.split("/")
actual_path = ""
i = 0
for p in paths:
i += 1
actual_path += "/" + p
try:
res = self.root[actual_path]
except __HOLE__:
res = None
if res is None:
if len(paths) != i:
return False
resource.path = actual_path
self.root[actual_path] = resource
return True
|
KeyError
|
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/server/coap.py/CoAP.add_resource
|
6,486
|
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except __HOLE__:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
|
ValueError
|
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/server/coap.py/CoAP._retransmit
|
6,487
|
def updateVersion(self, versionNumber):
""" updateVersion(versionNumber: int) -> None
Update the property page of the version
"""
self.versionNumber = versionNumber
self.versionNotes.updateVersion(versionNumber)
self.versionThumbs.updateVersion(versionNumber)
self.versionMashups.updateVersion(versionNumber)
if self.controller:
vistrail = self.controller.vistrail
if self.use_custom_colors:
custom_color = vistrail.get_action_annotation(versionNumber,
custom_color_key)
if custom_color is not None:
try:
custom_color = parse_custom_color(custom_color.value)
custom_color = QtGui.QColor(*custom_color)
except __HOLE__, e:
debug.warning("Version %r has invalid color "
"annotation (%s)" % (versionNumber, e))
custom_color = None
self.customColor.setColor(custom_color)
if vistrail.actionMap.has_key(versionNumber):
# Follow upgrades forward to find tag
tag = self.controller.get_tag(versionNumber)
if getattr(get_vistrails_configuration(), 'hideUpgrades', True):
base_ver = vistrail.get_base_upgrade_version(versionNumber)
else:
base_ver = versionNumber
action = vistrail.actionMap[base_ver]
self.tagEdit.setText(tag)
self.userEdit.setText(action.user)
self.dateEdit.setText(action.date)
if base_ver != versionNumber:
version_text = '%s (%s)' % (versionNumber, base_ver)
else:
version_text = '%s' % base_ver
self.idEdit.setText(version_text)
self.tagEdit.setEnabled(True)
return
else:
self.tagEdit.setEnabled(False)
self.tagReset.setEnabled(False)
self.tagEdit.setText('')
self.userEdit.setText('')
self.dateEdit.setText('')
self.idEdit.setText('')
|
ValueError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/version_prop.py/QVersionProp.updateVersion
|
6,488
|
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return the value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
"""
# Using the MutableMapping function directly fails due to the private
# marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except __HOLE__:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
|
KeyError
|
dataset/ETHPy150Open sigmavirus24/betamax/betamax/headers.py/HTTPHeaderDict.pop
|
6,489
|
def discard(self, key):
try:
del self[key]
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open sigmavirus24/betamax/betamax/headers.py/HTTPHeaderDict.discard
|
6,490
|
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except __HOLE__:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
|
KeyError
|
dataset/ETHPy150Open sigmavirus24/betamax/betamax/headers.py/HTTPHeaderDict.getlist
|
6,491
|
def build_split_list(self, expr):
'''
Take the max_char function and break up an expression list based on
the character limits of individual items
'''
if isinstance(expr, str):
expr = expr.split(',')
expr.sort()
new_list = []
running_total = 0
position = 0
for range in expr:
running_total += len(range) + 1
if running_total > self.max_char:
running_total = 0
position += 1
try:
new_list[position].append(range)
except (AttributeError, __HOLE__):
new_list.append([range,])
return new_list
|
IndexError
|
dataset/ETHPy150Open linkedin/sysops-api/seco/range/__init__.py/Range.build_split_list
|
6,492
|
def conjugate_row(row, K):
"""
Returns the conjugate of a row element-wise
Examples
========
>>> from sympy.matrices.densetools import conjugate_row
>>> from sympy import ZZ
>>> a = [ZZ(3), ZZ(2), ZZ(6)]
>>> conjugate_row(a, ZZ)
[3, 2, 6]
"""
result = []
for r in row:
try:
result.append(r.conjugate())
except __HOLE__:
result.append(r)
return result
|
AttributeError
|
dataset/ETHPy150Open sympy/sympy/sympy/matrices/densetools.py/conjugate_row
|
6,493
|
def short(self, url):
params = {'url': url}
response = self._post(self.api_url, data=params)
if response.ok:
try:
data = response.json()
except __HOLE__:
raise ShorteningErrorException('There was an error shortening'
' this url - {0}'.format(
response.content))
return data['meta']['rdd_url']
raise ShorteningErrorException('There was an error shortening this '
'url - {0}'.format(response.content))
|
ValueError
|
dataset/ETHPy150Open ellisonleao/pyshorteners/pyshorteners/shorteners/readability.py/Readability.short
|
6,494
|
def expand(self, url):
url_id = url.split('/')[-1]
api_url = '{0}{1}'.format(self.api_url, url_id)
response = self._get(api_url)
if response.ok:
try:
data = response.json()
except __HOLE__ as e:
raise ExpandingErrorException('There was an error expanding'
' this url - {0}'.format(e))
return data['meta']['full_url']
raise ExpandingErrorException('There was an error expanding'
' this url - {0}'.format(
response.content))
|
ValueError
|
dataset/ETHPy150Open ellisonleao/pyshorteners/pyshorteners/shorteners/readability.py/Readability.expand
|
6,495
|
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase._pre_setup, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except __HOLE__:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
|
KeyError
|
dataset/ETHPy150Open jazzband/django-pipeline/tests/utils.py/modify_settings.enable
|
6,496
|
def run(self):
self._push_rpc = pushrpc.PushRPC(self._push_event_callback, self._args)
# These modules should work 100% of the time -
# they don't need special hardware, or root
self._proxies = {
'hue': hue.Hue(self._args.hue_scan_interval_secs,
self._device_event_callback),
'wemo': wemo.Wemo(self._args.hue_scan_interval_secs,
self._device_event_callback),
'sonos': sonos.Sonos(self._args.hue_scan_interval_secs,
self._device_event_callback),
}
try:
from pi import network
self._proxies['network'] = network.NetworkMonitor(
self._device_event_callback,
self._args.network_scan_interval_secs,
self._args.network_scan_timeout_secs)
except:
logging.debug('Exception was:', exc_info=sys.exc_info())
logging.error('Failed to initialize network module - did you '
'run as root?')
# This module needs a 433Mhz transmitter, wiringPi etc, so might not work
try:
from pi import rfswitch
self._proxies['rfswitch'] = rfswitch.RFSwitch(self._args.rfswtich_pin)
except:
logging.debug('Exception was:', exc_info=sys.exc_info())
logging.error('Failed to initialize rfswitch module - have you '
'installed rcswitch?')
# This module needs a zwave usb stick
try:
from pi import zwave
self._proxies['zwave'] = zwave.ZWave(
self._args.zwave_device, self._device_event_callback)
except:
logging.debug('Exception was:', exc_info=sys.exc_info())
logging.error('Failed to initialize zwave module - have you '
'installed libopenzwave?')
# Just sit in a loop sleeping for now
try:
events.run()
except __HOLE__:
logging.info('Shutting down')
# Now try and shut everything down gracefully
self._push_rpc.stop()
for proxy in self._proxies.itervalues():
proxy.stop()
for proxy in self._proxies.itervalues():
proxy.join()
|
KeyboardInterrupt
|
dataset/ETHPy150Open tomwilkie/awesomation/src/pi/control.py/Control.run
|
6,497
|
def read_ints(self, shape, order='C', full_record=False):
"""
Returns integers as a :mod:`numpy` array of `shape`.
shape: tuple(int)
Dimensions of returned array.
order: string
If 'C', the data is in row-major order.
If 'Fortran', the data is in column-major order.
full_record: bool
If True, then read surrounding recordmarks.
Only meaningful if `unformatted`.
"""
reshape = False
count = 1
try:
for size in shape:
count *= size
reshape = True
except __HOLE__:
count = shape
if full_record and self.unformatted:
reclen = self.read_recordmark()
if reclen != self.reclen_ints(count):
raise RuntimeError('unexpected recordlength %d' % reclen)
sep = '' if self.binary else ' '
dtype = numpy.int64 if self.integer_8 else numpy.int32
data = numpy.fromfile(self.file, dtype=dtype, count=count, sep=sep)
if self.need_byteswap:
data.byteswap(True)
if full_record and self.unformatted:
reclen2 = self.read_recordmark()
if reclen2 != reclen:
raise RuntimeError('mismatched recordlength %d vs. %d'
% (reclen2, reclen))
return data.reshape(shape, order=order) if reshape else data
|
TypeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/stream.py/Stream.read_ints
|
6,498
|
def read_floats(self, shape, order='C', full_record=False):
"""
Returns floats as a :mod:`numpy` array of `shape`.
shape: tuple(int)
Dimensions of returned array.
order: string
If 'C', the data is in row-major order.
If 'Fortran', the data is in column-major order.
full_record: bool
If True, then read surrounding recordmarks.
Only meaningful if `unformatted`.
"""
reshape = False
count = 1
try:
for size in shape:
count *= size
reshape = True
except __HOLE__:
count = shape
if full_record and self.unformatted:
reclen = self.read_recordmark()
if reclen != self.reclen_floats(count):
raise RuntimeError('unexpected recordlength %d' % reclen)
sep = '' if self.binary else ' '
dtype = numpy.float32 if self.single_precision else numpy.float64
data = numpy.fromfile(self.file, dtype=dtype, count=count, sep=sep)
if self.need_byteswap:
data.byteswap(True)
if full_record and self.unformatted:
reclen2 = self.read_recordmark()
if reclen2 != reclen:
raise RuntimeError('mismatched recordlength %d vs. %d'
% (reclen2, reclen))
return data.reshape(shape, order=order) if reshape else data
|
TypeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/stream.py/Stream.read_floats
|
6,499
|
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (__HOLE__, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/http/multipartparser.py/MultiPartParser.__init__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.