Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
6,300
|
def setup_threading():
if sys.version_info >= (2, 7):
return
# XXX: On Python 2.5 GAE's threading.local doesn't work correctly
# with subclassing.
try:
from django.utils._threading_local import local
import threading
threading.local = local
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open django-nonrel/djangoappengine/djangoappengine/boot.py/setup_threading
|
6,301
|
def setup_project(dev_appserver_version):
from djangoappengine.utils import have_appserver, on_production_server
if have_appserver:
# This fixes a pwd import bug for os.path.expanduser().
env_ext['HOME'] = PROJECT_DIR
# The dev_appserver creates a sandbox which restricts access to
# certain modules and builtins in order to emulate the production
# environment. Here we get the subprocess module back into the
# dev_appserver sandbox. This module is just too important for
# development. Also we add the compiler/parser module back and
# enable https connections (seem to be broken on Windows because
# the _ssl module is disallowed).
if not have_appserver and dev_appserver_version == 1:
try:
from google.appengine.tools import dev_appserver
except __HOLE__:
from google.appengine.tools import old_dev_appserver as dev_appserver
try:
# Backup os.environ. It gets overwritten by the
# dev_appserver, but it's needed by the subprocess module.
env = dev_appserver.DEFAULT_ENV
dev_appserver.DEFAULT_ENV = os.environ.copy()
dev_appserver.DEFAULT_ENV.update(env)
# Backup the buffer() builtin. The subprocess in Python 2.5
# on Linux and OS X uses needs it, but the dev_appserver
# removes it.
dev_appserver.buffer = buffer
except AttributeError:
logging.warn("Could not patch the default environment. "
"The subprocess module will not work correctly.")
try:
# Allow importing compiler/parser, _ssl (for https),
# _io for Python 2.7 io support on OS X
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io'))
except AttributeError:
logging.warn("Could not patch modules whitelist. the compiler "
"and parser modules will not work and SSL support "
"is disabled.")
elif not on_production_server and dev_appserver_version == 1:
try:
try:
from google.appengine.tools import dev_appserver
except ImportError:
from google.appengine.tools import old_dev_appserver as dev_appserver
# Restore the real subprocess module.
from google.appengine.api.mail_stub import subprocess
sys.modules['subprocess'] = subprocess
# Re-inject the buffer() builtin into the subprocess module.
subprocess.buffer = dev_appserver.buffer
except Exception, e:
logging.warn("Could not add the subprocess module to the "
"sandbox: %s" % e)
os.environ.update(env_ext)
extra_paths = [PROJECT_DIR, os.path.join(os.path.dirname(__file__), 'lib')]
zip_packages_dir = os.path.join(PROJECT_DIR, 'zip-packages')
# We support zipped packages in the common and project folders.
if os.path.isdir(zip_packages_dir):
for zip_package in os.listdir(zip_packages_dir):
extra_paths.append(os.path.join(zip_packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets
# raised on the first request of a main.py instance, so don't call
# setup_project() multiple times. We ensure this indirectly by
# checking if we've already modified sys.path, already.
if len(sys.path) < len(extra_paths) or \
sys.path[:len(extra_paths)] != extra_paths:
for path in extra_paths:
while path in sys.path:
sys.path.remove(path)
sys.path = extra_paths + sys.path
|
ImportError
|
dataset/ETHPy150Open django-nonrel/djangoappengine/djangoappengine/boot.py/setup_project
|
6,302
|
def _get_templated_url(self, template, id, method=None):
try:
id_unicode = unicode(id, "UTF-8")
except __HOLE__:
id_unicode = id
id_utf8 = id_unicode.encode("UTF-8")
md5_of_url = hashlib.md5(id_utf8).hexdigest()
url = template % md5_of_url
return(url)
|
TypeError
|
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/delicious.py/Delicious._get_templated_url
|
6,303
|
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except __HOLE__:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
|
IOError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/find_function
|
6,304
|
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except __HOLE__:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
|
IOError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.__init__
|
6,305
|
def handle_command_def(self,line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except __HOLE__:
func = self.default
# one of the resuming commands
if func.func_name in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
|
AttributeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.handle_command_def
|
6,306
|
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except __HOLE__:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_break
|
6,307
|
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except __HOLE__:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_enable
|
6,308
|
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except __HOLE__:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_disable
|
6,309
|
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except __HOLE__:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
|
IndexError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_condition
|
6,310
|
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except __HOLE__:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
|
IndexError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_ignore
|
6,311
|
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except __HOLE__:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_clear
|
6,312
|
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except __HOLE__, e:
print >>self.stdout, '*** Jump failed:', e
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_jump
|
6,313
|
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno,
self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.do_list
|
6,314
|
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/Pdb.print_stack_trace
|
6,315
|
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except __HOLE__:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The " + mainpyfile + \
" will be restarted"
# When invoked as main program, invoke the debugger on a script
|
SystemExit
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/pdb.py/main
|
6,316
|
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except __HOLE__:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/sessions/backends/base.py/SessionBase._get_session
|
6,317
|
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except __HOLE__:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/sessions/backends/base.py/SessionBase.set_expiry
|
6,318
|
def catcher():
for i in range(1000):
try:
someFunction()
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open kayhayen/Nuitka/tests/benchmarks/micro/TryFinallyStopOver.py/catcher
|
6,319
|
def extract_rate_limit(self, response):
"""Extract rate limit info from response/headers.
get it just from the response, so it is relevant to the type of query we are doing"""
try:
self.rate_limit_remaining = int(response.headers['x-rate-limit-remaining'])
self.rate_limit_limit = int(response.headers['x-rate-limit-limit'])
self.rate_limit_reset = epoch(int(response.headers['x-rate-limit-reset'])).datetime
self.twitter_date = parse(response.headers['date']).datetime
logging.debug(
'Twitter rate limit info:: rate-limit: %s, remaining: %s' % (self.rate_limit_limit, self.rate_limit_remaining))
# logging.debug(
# 'Twitter rate limit info:: rate-limit: %s, remaining: %s, '\
# 'reset: %s, current-time: %s' % (self.rate_limit_limit,
# self.rate_limit_remaining, self.rate_limit_reset, self.twitter_date))
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/twitter_paging.py/TwitterPager.extract_rate_limit
|
6,320
|
def paginated_search(self, page=1, page_handler=None,
max_pages=None, **kwargs):
"""Issue search with AppClient up to max_pages.
For kwargs requirements, see docs for birdy AppClient."""
if max_pages is None:
max_pages = self.default_max_pages
response = self.query(**kwargs)
if page_handler:
has_next_page = page_handler(response)
if page < max_pages and has_next_page:
try:
kwargs.update({ k:v for k,v in urlparse.parse_qsl(
response.data.search_metadata.next_results[1:]) })
if int(kwargs['max_id']) > int(kwargs.get('since_id',0)):
# logging.debug('Paginating query: %s' % str(kwargs))
self.paginated_search(page=page+1,
page_handler=page_handler,
max_pages=max_pages, **kwargs)
except __HOLE__:
try:
kwargs['max_id'] = str(response.data[-1]["id"])
# logging.debug('Paginating query: %s' % str(kwargs))
self.paginated_search(page=page+1,
page_handler=page_handler,
max_pages=max_pages, **kwargs)
except (IndexError, TypeError):
logging.debug('error paging, so stop')
else:
# logging.debug('reached max pages or told no next page, so stop')
pass
return response
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/twitter_paging.py/TwitterPager.paginated_search
|
6,321
|
def test_session_timeout(self):
requested_url = '/project/instances/'
request = self.factory.get(requested_url)
try:
timeout = settings.SESSION_TIMEOUT
except __HOLE__:
timeout = 1800
request.session['last_activity'] = int(time.time()) - (timeout + 10)
mw = middleware.HorizonMiddleware()
resp = mw.process_request(request)
self.assertEqual(302, resp.status_code)
self.assertEqual(requested_url, resp.get('Location'))
|
AttributeError
|
dataset/ETHPy150Open CiscoSystems/avos/horizon/test/tests/middleware.py/MiddlewareTests.test_session_timeout
|
6,322
|
def is_storage_local(storage):
"""
Check to see if a file storage is local.
"""
try:
storage.path('test')
except __HOLE__:
return False
return True
|
NotImplementedError
|
dataset/ETHPy150Open SmileyChris/easy-thumbnails/easy_thumbnails/utils.py/is_storage_local
|
6,323
|
def get_modified_time(storage, name):
"""
Get modified time from storage, ensuring the result is a timezone-aware
datetime.
"""
try:
modified_time = storage.modified_time(name)
except OSError:
return 0
except __HOLE__:
return None
if modified_time and timezone.is_naive(modified_time):
if getattr(settings, 'USE_TZ', False):
default_timezone = timezone.get_default_timezone()
return timezone.make_aware(modified_time, default_timezone)
return modified_time
|
NotImplementedError
|
dataset/ETHPy150Open SmileyChris/easy-thumbnails/easy_thumbnails/utils.py/get_modified_time
|
6,324
|
def __init__(self, folder, callback, file_pattern=["*.log"], includeSubFolder=False, excludeFileListFile=None, tail_lines=0,
sizehint=1048576):
"""Arguments:
(str) @folder:
the folder to watch
(callable) @callback:
a function which is called every time one of the file being
watched is updated;
this is called with "filename" and "lines" arguments.
(list) @file_pattern:
only watch files with these file_pattern
(list) @includeSubFolder:
only watch the given folder, or watch all sub-folders under the given folder
(str)excludeFileListFile:
in this list file, it contains the excluded log files. each line is for each excluded log file(absolute file path)
The argument is very useful if we monitor one big folder for long time, it can reduce log scan cost
(int) @tail_lines:
read last N lines from files being watched before starting
(int) @sizehint: passed to file.readlines(), represents an
approximation of the maximum number of bytes to read from
a file on every ieration (as opposed to load the entire
file in memory until EOF is reached). Defaults to 1MB.
"""
self.folder = os.path.realpath(folder)
self.file_pattern = file_pattern
self.includeSubFolder=includeSubFolder
self.excludeFileListFile=excludeFileListFile
self._files_map = {}
self._callback = callback
self._sizehint = sizehint
assert os.path.isdir(self.folder), self.folder
assert callable(callback), repr(callback)
self.update_files()
for id, file in self._files_map.items():
file.seek(os.path.getsize(file.name)) # EOF
if tail_lines:
try:
lines = self.tail(file.name, tail_lines)
except __HOLE__ as err:
if err.errno != errno.ENOENT:
raise
else:
if lines:
self._callback(file.name, lines)
|
IOError
|
dataset/ETHPy150Open harryliu/edwin/edwinAgent/site_packages/logwatch_glob.py/LogWatcher.__init__
|
6,325
|
def __get__(self, instance, owner=None):
try:
value = FieldProperty.__get__(self, instance, owner)
except __HOLE__:
value = None
if not value:
return None
if instance is None:
return value
return self._upload_type(value)
|
AttributeError
|
dataset/ETHPy150Open amol-/depot/depot/fields/ming.py/UploadedFileProperty.__get__
|
6,326
|
@staticmethod
def _close_nodes(nodepaths, get_node):
for nodepath in nodepaths:
try:
node = get_node(nodepath)
except __HOLE__:
pass
else:
if not node._v_isopen or node._v__deleting:
continue
try:
# Avoid descendent nodes to also iterate over
# their descendents, which are already to be
# closed by this loop.
if hasattr(node, '_f_get_child'):
node._g_close()
else:
node._f_close()
del node
except ClosedNodeError:
#import traceback
#type_, value, tb = sys.exc_info()
#exception_dump = ''.join(
# traceback.format_exception(type_, value, tb))
#warnings.warn(
# "A '%s' exception occurred trying to close a node "
# "that was supposed to be open.\n"
# "%s" % (type_.__name__, exception_dump))
pass
|
KeyError
|
dataset/ETHPy150Open PyTables/PyTables/tables/file.py/NodeManager._close_nodes
|
6,327
|
def __str__(self):
"""Return a short string representation of the object tree.
Examples
--------
::
>>> f = tables.open_file('data/test.h5')
>>> print(f)
data/test.h5 (File) 'Table Benchmark'
Last modif.: 'Mon Sep 20 12:40:47 2004'
Object Tree:
/ (Group) 'Table Benchmark'
/tuple0 (Table(100,)) 'This is the table title'
/group0 (Group) ''
/group0/tuple1 (Table(100,)) 'This is the table title'
/group0/group1 (Group) ''
/group0/group1/tuple2 (Table(100,)) 'This is the table title'
/group0/group1/group2 (Group) ''
"""
if not self.isopen:
return "<closed File>"
# Print all the nodes (Group and Leaf objects) on object tree
try:
date = time.asctime(time.localtime(os.stat(self.filename)[8]))
except __HOLE__:
# in-memory file
date = ""
astring = self.filename + ' (File) ' + repr(self.title) + '\n'
# astring += 'root_uep :=' + repr(self.root_uep) + '; '
# astring += 'format_version := ' + self.format_version + '\n'
# astring += 'filters :=' + repr(self.filters) + '\n'
astring += 'Last modif.: ' + repr(date) + '\n'
astring += 'Object Tree: \n'
for group in self.walk_groups("/"):
astring += str(group) + '\n'
for kind in self._node_kinds[1:]:
for node in self.list_nodes(group, kind):
astring += str(node) + '\n'
return astring
|
OSError
|
dataset/ETHPy150Open PyTables/PyTables/tables/file.py/File.__str__
|
6,328
|
def dispatch(self, json_data, **kwargs):
'''
Verifies that the passed json encoded string
is in the correct form according to the json-rpc spec
and calls the appropriate method
Checks:
1. that the string encodes into a javascript Object (dictionary)
2. that 'method' and 'params' are present
3. 'method' must be a javascript String type
4. 'params' must be a javascript Array type
Returns:
the JSON encoded response
'''
try:
# attempt to do a json decode on the data
jsondict = json.loads(json_data)
except ValueError:
return self._encode_result('', None,
{'message': 'JSON decoding error',
'code': JSONRPC_PARSE_ERROR})
if not isinstance(jsondict, dict):
# verify the json data was a javascript Object which gets decoded
# into a python dictionary
return self._encode_result('', None,
{'message': 'Cannot decode to a javascript Object',
'code': JSONRPC_BAD_CALL_ERROR})
if not 'method' in jsondict or not 'params' in jsondict:
# verify the dictionary contains the correct keys
# for a proper jsonrpc call
return self._encode_result(jsondict.get('id', ''), None,
{'message': "JSONRPC requests must have the "+ \
"attributes 'method' and 'params'",
'code': JSONRPC_BAD_CALL_ERROR})
if not isinstance(jsondict['method'], StringTypes):
return self._encode_result(jsondict.get('id', ''), None,
{'message': 'method must be a javascript String',
'code': JSONRPC_BAD_CALL_ERROR})
if not isinstance(jsondict['params'], list):
return self._encode_result(jsondict.get('id', ''), None,
{'message': 'params must be a javascript Array',
'code': JSONRPC_BAD_CALL_ERROR})
if jsondict['method'] in self.methods:
try:
try:
result = self.methods[jsondict.get('method')] \
(*jsondict.get('params'), **kwargs)
except __HOLE__:
# Catch unexpected keyword argument error
result = self.methods[jsondict.get('method')] \
(*jsondict.get('params'))
except Exception, e:
# this catches any error from the called method raising
# an exception to the wrong number of params being sent
# to the method.
return self._encode_result(jsondict.get('id', ''), None,
{'message': repr(e),
'code': JSONRPC_SERVICE_ERROR})
return self._encode_result(jsondict.get('id', ''), result, None)
else:
return self._encode_result(jsondict.get('id', ''), None,
{'message': 'method "' + jsondict['method'] + \
'" is not supported',
'code': JSONRPC_PROCEDURE_NOT_FOUND_ERROR})
|
TypeError
|
dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/expedient/common/rpc4django/jsonrpcdispatcher.py/JSONRPCDispatcher.dispatch
|
6,329
|
def get_image_field_class():
try:
from sorl.thumbnail import ImageField
except __HOLE__:
from django.db.models import ImageField
return ImageField
|
ImportError
|
dataset/ETHPy150Open hovel/pybbm/pybb/compat.py/get_image_field_class
|
6,330
|
def get_image_field_full_name():
try:
from sorl.thumbnail import ImageField
name = 'sorl.thumbnail.fields.ImageField'
except __HOLE__:
from django.db.models import ImageField
name = 'django.db.models.fields.files.ImageField'
return name
|
ImportError
|
dataset/ETHPy150Open hovel/pybbm/pybb/compat.py/get_image_field_full_name
|
6,331
|
def get_atomic_func():
try:
from django.db.transaction import atomic as atomic_func
except __HOLE__:
from django.db.transaction import commit_on_success as atomic_func
return atomic_func
|
ImportError
|
dataset/ETHPy150Open hovel/pybbm/pybb/compat.py/get_atomic_func
|
6,332
|
def get_paginator_class():
try:
from pure_pagination import Paginator
pure_pagination = True
except __HOLE__:
# the simplest emulation of django-pure-pagination behavior
from django.core.paginator import Paginator, Page
class PageRepr(int):
def querystring(self):
return 'page=%s' % self
Page.pages = lambda self: [PageRepr(i) for i in range(1, self.paginator.num_pages + 1)]
pure_pagination = False
return Paginator, pure_pagination
|
ImportError
|
dataset/ETHPy150Open hovel/pybbm/pybb/compat.py/get_paginator_class
|
6,333
|
def get_previous_transaction(self):
siblings = StockTransaction.get_ordered_transactions_for_stock(
self.case_id, self.section_id, self.product_id
).filter(report__date__lte=self.report.date).exclude(pk=self.pk)
try:
return siblings[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/casexml/apps/stock/models.py/StockTransaction.get_previous_transaction
|
6,334
|
@classmethod
def latest(cls, case_id, section_id, product_id):
relevant = cls.get_ordered_transactions_for_stock(case_id, section_id, product_id)
try:
return relevant.select_related()[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/casexml/apps/stock/models.py/StockTransaction.latest
|
6,335
|
def update_check(settings):
"""
Check whether the dependencies are sufficient to run Eden
@ToDo: Load deployment_settings so that we can configure the update_check
- need to rework so that 000_config.py is parsed 1st
@param settings: the deployment_settings
"""
# Get Web2py environment into our globals.
#globals().update(**environment)
request = current.request
# Fatal errors
errors = []
# Non-fatal warnings
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
# Get mandatory global dependencies
app_path = request.folder
gr_path = os.path.join(app_path, "requirements.txt")
or_path = os.path.join(app_path, "optional_requirements.txt")
global_dep = parse_requirements({}, gr_path)
optional_dep = parse_requirements({}, or_path)
templates = settings.get_template()
location = settings.get_template_location()
if not isinstance(templates, (tuple, list)):
templates = (templates,)
template_dep = {}
template_optional_dep = {}
for template in templates:
tr_path = os.path.join(app_path, location, "templates", template, "requirements.txt")
tor_path = os.path.join(app_path, location, "templates", template, "optional_requirements.txt")
parse_requirements(template_dep, tr_path)
parse_requirements(template_optional_dep, tor_path)
# Remove optional dependencies which are already accounted for in template dependencies
unique = set(optional_dep.keys()).difference(set(template_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
# Override optional dependency messages from template
unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep)
# @ToDo: Move these to Template
# for now this is done in s3db.climate_first_run()
if settings.has_module("climate"):
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("Climate unresolved dependency: RPy2 required")
try:
from Scientific.IO import NetCDF
except __HOLE__:
warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
# -------------------------------------------------------------------------
# Check Web2Py version
#
# Currently, the minimum usable Web2py is determined by whether the
# Scheduler is available
web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44"
# Offset of datetime in return value of parse_version.
datetime_index = 4
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
try:
web2py_minimum_parsed = parse_version(web2py_minimum_version)
web2py_minimum_datetime = web2py_minimum_parsed[datetime_index]
version_info = open("VERSION", "r")
web2py_installed_version = version_info.read().split()[-1].strip()
version_info.close()
if isinstance(web2py_installed_version, str):
# Post 2.4.2, global_settings.web2py_version is unparsed
web2py_installed_parsed = parse_version(web2py_installed_version)
web2py_installed_datetime = web2py_installed_parsed[datetime_index]
else:
# 2.4.2 & earlier style
web2py_installed_datetime = web2py_installed_version[datetime_index]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
except:
# Will get AttributeError if Web2py's parse_version is too old for
# its current version format, which changed in 2.3.2.
web2py_version_ok = False
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to support the current version of Sahana Eden."
"\nPlease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Create required directories if needed
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
# - 000_config.py (machine-specific settings)
# - rest are run in-place
#
template_folder = os.path.join(app_path, "modules", "templates")
template_files = {
# source : destination
"000_config.py" : os.path.join("models", "000_config.py"),
}
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_folder, t)
dst_path = os.path.join(app_path, template_files[t])
try:
os.stat(dst_path)
except OSError:
# Not found, copy from template
if t == "000_config.py":
input = open(src_path)
output = open(dst_path, "w")
for line in input:
if "akeytochange" in line:
# Generate a random hmac_key to secure the passwords in case
# the database is compromised
import uuid
hmac_key = uuid.uuid4()
line = 'settings.auth.hmac_key = "%s"' % hmac_key
output.write(line)
output.close()
input.close()
else:
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(template_files[t])
# @ToDo: WebSetup
# http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
#if not os.path.exists("%s/applications/websetup" % os.getcwd()):
# # @ToDo: Check Permissions
# # Copy files into this folder (@ToDo: Pythonise)
# cp -r private/websetup "%s/applications" % os.getcwd()
# Launch WebSetup
#redirect(URL(a="websetup", c="default", f="index",
# vars=dict(appname=request.application,
# firstTime="True")))
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# Check if it's up to date (i.e. a critical update requirement)
version_pattern = r"VERSION =\s*([0-9]+)"
version_matcher = re.compile(version_pattern).match
has_version = False
with open(dst_path) as f:
for line in f:
version_result = version_matcher(line)
if version_result:
has_version = True
version = version_result.group(1)
break
if not has_version:
error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
errors.append(error)
elif int(version) != VERSION:
error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
(t, version, VERSION)
errors.append(error)
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# -------------------------------------------------------------------------
|
ImportError
|
dataset/ETHPy150Open sahana/eden/modules/s3_update_check.py/update_check
|
6,336
|
def parse_requirements(output, filepath):
"""
"""
try:
with open(filepath) as filehandle:
dependencies = filehandle.read().splitlines()
msg = ""
for dependency in dependencies:
if dependency[0] == "#":
# either a normal comment or custom message
if dependency[:9] == "# Warning" or dependency[7] == "# Error:":
msg = dependency.split(":", 1)[1]
else:
import re
# Check if the module name is different from the package name
if "#" in dependency:
dep = dependency.split("#", 1)[1]
output[dep] = msg
else:
pattern = re.compile(r'([A-Za-z0-9_-]+)')
try:
dep = pattern.match(dependency).group(1)
output[dep] = msg
except __HOLE__:
# Invalid dependency syntax
pass
msg = ""
except IOError:
# No override for Template
pass
return output
# -------------------------------------------------------------------------
|
AttributeError
|
dataset/ETHPy150Open sahana/eden/modules/s3_update_check.py/parse_requirements
|
6,337
|
def s3_check_python_lib(global_mandatory, template_mandatory, template_optional, global_optional):
"""
checks for optional as well as mandatory python libraries
"""
errors = []
warnings = []
for dependency, err in global_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("S3 unresolved dependency: %s required for Sahana to run" % dependency)
for dependency, err in template_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("Unresolved template dependency: %s required" % dependency)
for dependency, warn in template_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
for dependency, warn in global_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except __HOLE__:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
return errors, warnings
# END =========================================================================
|
ImportError
|
dataset/ETHPy150Open sahana/eden/modules/s3_update_check.py/s3_check_python_lib
|
6,338
|
def get_info(self, path):
dirname, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
ftype = 'file'
if os.path.isdir(path):
ftype = 'dir'
elif os.path.islink(path):
ftype = 'link'
elif ext.lower() == '.fits':
ftype = 'fits'
bnch = Bunch.Bunch(self.na_dict)
try:
filestat = os.stat(path)
bnch.update(dict(path=path, name=filename, type=ftype,
st_mode=filestat.st_mode,
st_mode_oct=oct(filestat.st_mode),
st_size=filestat.st_size,
st_size_str=str(filestat.st_size),
st_mtime=filestat.st_mtime,
st_mtime_str=time.ctime(filestat.st_mtime)))
except __HOLE__ as e:
# TODO: identify some kind of error with this path
bnch.update(dict(path=path, name=filename, type=ftype,
st_mode=0, st_size=0,
st_mtime=0))
return bnch
|
OSError
|
dataset/ETHPy150Open ejeschke/ginga/ginga/misc/plugins/FBrowser.py/FBrowser.get_info
|
6,339
|
def __dict__(self):
try:
return self._current_object.__dict__
except __HOLE__:
return AttributeError('__dict__')
|
RuntimeError
|
dataset/ETHPy150Open dcramer/django-indexer/indexer/utils.py/Proxy.__dict__
|
6,340
|
def __repr__(self):
try:
obj = self._current_object
except __HOLE__:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
|
RuntimeError
|
dataset/ETHPy150Open dcramer/django-indexer/indexer/utils.py/Proxy.__repr__
|
6,341
|
def __nonzero__(self):
try:
return bool(self._current_object)
except __HOLE__:
return False
|
RuntimeError
|
dataset/ETHPy150Open dcramer/django-indexer/indexer/utils.py/Proxy.__nonzero__
|
6,342
|
def __unicode__(self):
try:
return unicode(self.__current_oject)
except __HOLE__:
return repr(self)
|
RuntimeError
|
dataset/ETHPy150Open dcramer/django-indexer/indexer/utils.py/Proxy.__unicode__
|
6,343
|
def __dir__(self):
try:
return dir(self._current_object)
except __HOLE__:
return []
|
RuntimeError
|
dataset/ETHPy150Open dcramer/django-indexer/indexer/utils.py/Proxy.__dir__
|
6,344
|
def _replaceEnv(self, match):
""" Internally used method to replace found matches of _RE_ENV regular
expression with corresponding environment variable.
"""
var = match.group('var')
try:
return os.environ[var]
except __HOLE__:
raise EnvironmentVariableNotFound('Can not find environment '
'variable: {0}'.format(var))
|
KeyError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/monitor/common.py/ArgumentMixin._replaceEnv
|
6,345
|
def top(sort_key='cpu'):
# TODO: add sort key middleware
_key = itemgetter(sort_key)
entries = [format_dict(pd) for pd in get_process_dicts()]
# handle an apparent bug in psutil where the first call of the
# process does not return any cpu percentages. sorting by memory
# percentages instead.
try:
sort_total = sum([_key(x) for x in entries if _key(x) is not None])
if not sort_total:
sort_key = 'mem'
_key = itemgetter(sort_key)
except __HOLE__:
pass
entries.sort(key=_key, reverse=True)
return {'entries': entries}
|
TypeError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/contrib/webtop/top.py/top
|
6,346
|
def canonize_duedate(duedate):
if duedate == None:
return '000000'
else:
try:
return duedate.strftime('%y%m%d')
except __HOLE__:
raise DueDateFormatException("Invalid type for canonize_duedate")
|
AttributeError
|
dataset/ETHPy150Open kapsiry/sikteeri/membership/reference_numbers.py/canonize_duedate
|
6,347
|
@register.filter
def quantity_ordered(product, order):
"""
e.g. {% if product|quantity_ordered:plata.order > 0 %} ... {% endif %}
"""
try:
return order.items.values('quantity').get(product=product)['quantity']
except __HOLE__:
return 0
|
ObjectDoesNotExist
|
dataset/ETHPy150Open matthiask/plata/plata/shop/templatetags/plata_tags.py/quantity_ordered
|
6,348
|
def request(self, req):
if self.cached:
# Try once to use the cached connection; if it fails to send the
# request then discard and try again.
try:
return self.requestOnce(self.cached, req)
except http_error.RequestError, err:
err.wrapped.clear()
self.cached.close()
self.cached = None
# If a problem occurs before or during the sending of the request, then
# throw a wrapper exception so that the caller knows it is safe to
# retry. Once the request is sent retries must be done more carefully
# as side effects may have occurred.
try:
conn = self.openConnection()
except (__HOLE__, SystemExit):
raise
except:
wrapped = util.SavedException()
raise http_error.RequestError(wrapped)
# Note that requestOnce may also throw RequestError, see above.
ret = self.requestOnce(conn, req)
if not ret.will_close:
self.cached = conn
return ret
|
KeyboardInterrupt
|
dataset/ETHPy150Open sassoftware/conary/conary/lib/http/connection.py/Connection.request
|
6,349
|
def startSSL(self, sock):
"""If needed, start SSL on the proxy or endpoint connection."""
if not self.doSSL:
return sock
if self.caCerts:
# If cert checking is requested use m2crypto
if SSL:
return startSSLWithChecker(sock, self.caCerts, self.commonName)
else:
warnings.warn("m2crypto is not installed; server certificates "
"will not be validated!")
try:
# Python >= 2.6
import ssl
return ssl.SSLSocket(sock)
except __HOLE__:
# Python < 2.6
sslSock = socket.ssl(sock, None, None)
return httplib.FakeSocket(sock, sslSock)
|
ImportError
|
dataset/ETHPy150Open sassoftware/conary/conary/lib/http/connection.py/Connection.startSSL
|
6,350
|
def requestOnce(self, conn, req):
if self.proxy and self.proxy.userpass[0] and not self.doTunnel:
req.headers['Proxy-Authorization'] = ('Basic ' +
base64.b64encode(":".join(self.proxy.userpass)))
try:
req.sendRequest(conn, isProxied=(self.proxy is not None))
except (KeyboardInterrupt, __HOLE__):
raise
except:
wrapped = util.SavedException()
raise http_error.RequestError(wrapped)
# Wait for a response.
lastTimeout = time.time()
while True:
if req.abortCheck and req.abortCheck():
raise http_error.AbortError()
# Wait 5 seconds for a response.
try:
active = select.select([conn.sock], [], [], 5)[0]
except select.error, err:
if err.args[0] == errno.EINTR:
# Interrupted system call -- we caught a signal but it was
# handled safely.
continue
raise
if active:
break
# Still no response from the server. Send blank lines to keep the
# connection alive, in case the server is behind a load balancer or
# firewall with short connection timeouts.
now = time.time()
if now - lastTimeout >= 15:
conn.send('\r\n')
lastTimeout = now
return conn.getresponse()
|
SystemExit
|
dataset/ETHPy150Open sassoftware/conary/conary/lib/http/connection.py/Connection.requestOnce
|
6,351
|
def get_mimetype(path, file_contents=None):
mimetypes.init([MIMEMAP])
mimetype, _ = mimetypes.guess_type(path)
if mimetype is None:
try:
import magic
if file_contents is not None:
mimetype = magic.from_buffer(file_contents, mime=True)
else:
mimetype = magic.from_file(path, mime=True)
except __HOLE__:
return mimetype
return mimetype
|
ImportError
|
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/util/mimetype.py/get_mimetype
|
6,352
|
def import_class(module_name):
"""
Import a module with a reasonable traceback and return that module as the
variable.
eg. dumps = import_class('simplejson.dumps')
if dumps is None:
dumps = import_class('json.dumps')
Reference: http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
Written by: Armin Ronacher
"""
module, klass = module_name.rsplit('.', 1)
try:
__import__(module)
except __HOLE__:
exc_type, exc_value, tb_root = sys.exc_info()
logger.warning(exc_type)
logger.warning(exc_value)
tb = tb_root
while tb is not None:
if tb.tb_frame.f_globals.get('__name__') == module:
raise exc_type, exc_value, tb_root
tb = tb.tb_next
return None
return getattr(sys.modules[module], klass)
|
ImportError
|
dataset/ETHPy150Open winhamwr/neckbeard/neckbeard/cloud_provisioners/base.py/import_class
|
6,353
|
def save(self):
"""Find the run with this build, or create a new one."""
try:
this_run = model.Run.objects.get(
series=self.run,
build=self.cleaned_data["build"],
)
except __HOLE__:
this_run = self.run.clone_for_series(
build=self.cleaned_data["build"],
user=self.user,
)
this_run.activate()
# now we need to return this new run as the one to be executed.
return super(EnvironmentBuildSelectionForm, self).save(), this_run.id
|
ObjectDoesNotExist
|
dataset/ETHPy150Open mozilla/moztrap/moztrap/view/runtests/forms.py/EnvironmentBuildSelectionForm.save
|
6,354
|
def readme():
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
readme = f.read()
except __HOLE__:
pass
pattern = re.compile(r'''
(?P<colon> : \n{2,})?
\s* \.\. [ ] code-block:: \s+ [^\n]+ \n
[ \t]* \n
(?P<block>
(?: (?: (?: \t | [ ]{3}) [^\n]* | [ \t]* ) \n)+
)
''', re.VERBOSE)
return pattern.sub(
lambda m: (':' + m.group('colon') if m.group('colon') else ' ::') +
'\n\n' +
'\n'.join(' ' + l for l in m.group('block').splitlines()) +
'\n\n',
readme, 0
)
|
IOError
|
dataset/ETHPy150Open crosspop/asuka/setup.py/readme
|
6,355
|
def __getattr__(self, name):
try:
return self[name]
except __HOLE__:
raise AttributeError(name)
|
KeyError
|
dataset/ETHPy150Open tensorprob/tensorprob/tensorprob/optimization_result.py/OptimizationResult.__getattr__
|
6,356
|
def update_dividend(self, symbol, data):
if len(data) == 0:
return
try:
self.divstore[symbol] = data
except __HOLE__:
del self.divstore[symbol]
self.divstore[symbol] = data
|
ValueError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/Manager.update_dividend
|
6,357
|
def _update(self, symbol, quotes):
"""Archive daily ohlcs, override if datasets exists.
Arguments:
symbol: Stock instrument.
quotes: numpy quotes data.
"""
i = 0
pre_ts = 0
indexes = []
for q in quotes:
# 2 hours interval should be safe for seperate daily quotes
if pre_ts and (q['time'] - pre_ts) > 7200:
indexes.append(i)
pre_ts = q['time']
i += 1
indexes.append(i)
pre_index = 0
for i in indexes:
sliced_qs = quotes[pre_index:i]
date = datetime.datetime.fromtimestamp(sliced_qs[0]['time']).date()
try:
ds = self._require_dataset(symbol, date, sliced_qs.shape)
except __HOLE__, e:
if e.message.startswith('Shapes do not match'):
self._drop_dataset(symbol, date)
ds = self._require_dataset(symbol, date, sliced_qs.shape)
else:
raise e
ds[:] = sliced_qs
pre_index = i
|
TypeError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/OHLC._update
|
6,358
|
def _update_multi(self, symbol, quotes):
"""Archive multiday ohlcs, override if datasets exists.
Arguments:
symbol: Stock instrument.
quotes: numpy quotes data.
"""
i = 0
pre_day = None
indexes = []
indexes.append([0, len(quotes)])
for row in quotes:
day = datetime.datetime.fromtimestamp(row['time']).day
if pre_day and pre_day != day:
# found next day boundary
indexes[-1][1] = i
indexes.append([i, len(quotes)])
i += 1
pre_day = day
for i0, i1 in indexes:
t0, t1 = quotes[i0]['time'], quotes[i1-1]['time']
dt = datetime.datetime.fromtimestamp(t0)
dsi0, dsi1 = self.timestamp_to_index(dt, t0), self.timestamp_to_index(dt, t1)
sliced = quotes[i0:i1]
ds = self._require_dataset(symbol, dt.date(), sliced.shape)
if dsi0 != 0:
dsi1 = dsi1 + 1
logging.debug("ds[%d:%d] = quotes[%d:%d]" % (dsi0, dsi1, i0, i1))
try:
ds[dsi0:dsi1] = sliced
except __HOLE__:
logging.debug("data may have holes")
for row in sliced:
r_dsi = self.timestamp_to_index(dt, row['time'])
# logging.debug("r_dsi: %d" % r_dsi)
ds[r_dsi] = row
|
TypeError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/OHLC._update_multi
|
6,359
|
def get(self, symbol, length):
year = datetime.datetime.today().isocalendar()[0]
try:
data = self._get_year_data(symbol, year)
except KeyError:
self.handle[symbol] # test symbol existence
data = []
while True:
if len(data) >= length:
break
year = year - 1
try:
ydata = self._get_year_data(symbol, year)
except __HOLE__:
# wrong length
return data
if len(ydata) == 0:
break
if len(data) == 0:
data = ydata
else:
data = np.append(ydata, data)
return data[-length:]
|
KeyError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/Day.get
|
6,360
|
def update(self, symbol, data):
"""append daily history data to daily archive.
Arguments
=========
- `symbol`: symbol.
- `npydata`: data of npy file.
"""
prev_year = None
ds = None
newdata = None
for row in data:
day = datetime.datetime.fromtimestamp(row['time'])
isoyear = day.isocalendar()[0]
if prev_year != isoyear:
if prev_year:
# ds will be changed, save prev ds first
ds[:] = newdata
ds = self._require_dataset(symbol, isoyear)
newdata = ds[:]
index = self._index_of_day(day)
try:
newdata[index] = row
except __HOLE__, e:
logging.error("IndexError on: %s, %s, %s" % (symbol, isoyear, day))
prev_year = isoyear
if ds != None and newdata != None:
ds[:] = newdata
self.flush()
return True
|
IndexError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/Day.update
|
6,361
|
def _require_dataset(self, symbol):
try:
return self._dataset(symbol)
except __HOLE__:
return self.handle.create_dataset(symbol,
(self.shape_x, ),
self.DTYPE)
|
KeyError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/Minute._require_dataset
|
6,362
|
def _rewrite(self, tostore):
if self.__len__() > 0:
for key in self.keys():
try:
tostore.update(key, self.__getitem__(key))
except __HOLE__:
logging.error("Inconsistent data for %s, ignoring." % key)
self.__delitem__(key)
tostore.flush()
|
AssertionError
|
dataset/ETHPy150Open yinhm/datafeed/datafeed/datastore.py/MinuteSnapshotCache._rewrite
|
6,363
|
def get_pickle():
try:
import cPickle as pickle
except __HOLE__:
import pickle # noqa
return pickle
|
ImportError
|
dataset/ETHPy150Open codysoyland/django-phased/phased/utils.py/get_pickle
|
6,364
|
def restore_csrf_token(request, storage=None):
"""
Given the request and a the context used during the second render phase,
this wil check if there is a CSRF cookie and restores if needed, to
counteract the way the CSRF framework invalidates the CSRF token after
each request/response cycle.
"""
if storage is None:
storage = {}
try:
request.META["CSRF_COOKIE"] = request.COOKIES[settings.CSRF_COOKIE_NAME]
except __HOLE__:
csrf_token = storage.get('csrf_token', None)
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
return storage
|
KeyError
|
dataset/ETHPy150Open codysoyland/django-phased/phased/utils.py/restore_csrf_token
|
6,365
|
def polymorphic_union(table_map, typecolname,
aliasname='p_union', cast_nulls=True):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for
each row. If ``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented
as labeled NULLs, will be passed into CAST. This is a legacy behavior
that is problematic on some backends such as Oracle - in which case it
can be set to False.
"""
colnames = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map:
table = table_map[key]
# mysql doesn't like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except __HOLE__:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type, table in table_map.items():
if typecolname is not None:
result.append(
sql.select([col(name, table) for name in colnames] +
[sql.literal_column(
sql_util._quote_ddl_expr(type)).
label(typecolname)],
from_obj=[table]))
else:
result.append(sql.select([col(name, table) for name in colnames],
from_obj=[table]))
return sql.union_all(*result).alias(aliasname)
|
KeyError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/util.py/polymorphic_union
|
6,366
|
def identity_key(*args, **kwargs):
"""Generate "identity key" tuples, as are used as keys in the
:attr:`.Session.identity_map` dictionary.
This function has several call styles:
* ``identity_key(class, ident)``
This form receives a mapped class and a primary key scalar or
tuple as an argument.
E.g.::
>>> identity_key(MyClass, (1, 2))
(<class '__main__.MyClass'>, (1, 2))
:param class: mapped class (must be a positional argument)
:param ident: primary key, may be a scalar or tuple argument.
* ``identity_key(instance=instance)``
This form will produce the identity key for a given instance. The
instance need not be persistent, only that its primary key attributes
are populated (else the key will contain ``None`` for those missing
values).
E.g.::
>>> instance = MyClass(1, 2)
>>> identity_key(instance=instance)
(<class '__main__.MyClass'>, (1, 2))
In this form, the given instance is ultimately run though
:meth:`.Mapper.identity_key_from_instance`, which will have the
effect of performing a database check for the corresponding row
if the object is expired.
:param instance: object instance (must be given as a keyword arg)
* ``identity_key(class, row=row)``
This form is similar to the class/tuple form, except is passed a
database result row as a :class:`.RowProxy` object.
E.g.::
>>> row = engine.execute("select * from table where a=1 and b=2").\
first()
>>> identity_key(MyClass, row=row)
(<class '__main__.MyClass'>, (1, 2))
:param class: mapped class (must be a positional argument)
:param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy`
(must be given as a keyword arg)
"""
if args:
if len(args) == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except __HOLE__:
ident = kwargs.pop("ident")
elif len(args) == 2:
class_, ident = args
elif len(args) == 3:
class_, ident = args
else:
raise sa_exc.ArgumentError(
"expected up to three positional arguments, "
"got %s" % len(args))
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs))
mapper = class_mapper(class_)
if "ident" in locals():
return mapper.identity_key_from_primary_key(util.to_list(ident))
return mapper.identity_key_from_row(row)
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys))
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
|
KeyError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/util.py/identity_key
|
6,367
|
def __getattr__(self, key):
try:
_aliased_insp = self.__dict__['_aliased_insp']
except KeyError:
raise AttributeError()
else:
for base in _aliased_insp._target.__mro__:
try:
attr = object.__getattribute__(base, key)
except __HOLE__:
continue
else:
break
else:
raise AttributeError(key)
if isinstance(attr, PropComparator):
ret = attr.adapt_to_entity(_aliased_insp)
setattr(self, key, ret)
return ret
elif hasattr(attr, 'func_code'):
is_method = getattr(_aliased_insp._target, key, None)
if is_method and is_method.__self__ is not None:
return util.types.MethodType(attr.__func__, self, self)
else:
return None
elif hasattr(attr, '__get__'):
ret = attr.__get__(None, self)
if isinstance(ret, PropComparator):
return ret.adapt_to_entity(_aliased_insp)
else:
return ret
else:
return attr
|
AttributeError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/util.py/AliasedClass.__getattr__
|
6,368
|
def ReadTag(buf, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple."""
try:
start = pos
while ORD_MAP_AND_0X80[buf[pos]]:
pos += 1
pos += 1
return (buf[start:pos], pos)
except __HOLE__:
raise ValueError("Invalid tag")
# This function is HOT.
|
IndexError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/ReadTag
|
6,369
|
def Validate(self, value, **_):
"""Validates a python format representation of the value."""
# We only accept a base string, unicode object or RDFString here.
if not (value.__class__ is str or value.__class__ is unicode or
value.__class__ is rdfvalue.RDFString):
raise type_info.TypeValueError("%s not a valid string" % value)
if value.__class__ is unicode:
return value
# A String means a unicode String. We must be dealing with unicode strings
# here and the input must be encodable as a unicode object.
try:
try:
return value.__unicode__()
except __HOLE__:
return unicode(value, "utf8")
except UnicodeError:
raise type_info.TypeValueError("Not a valid unicode string")
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/ProtoString.Validate
|
6,370
|
def Validate(self, value, **_):
try:
return int(value)
except __HOLE__:
raise type_info.TypeValueError("Invalid value %s for Integer" % value)
|
ValueError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/ProtoUnsignedInteger.Validate
|
6,371
|
def Append(self, rdf_value=utils.NotAValue, wire_format=None, **kwargs):
"""Append the value to our internal list."""
if rdf_value is utils.NotAValue:
if wire_format is None:
rdf_value = self.type_descriptor.type(**kwargs)
self.dirty = True
else:
rdf_value = None
else:
# Coerce the value to the required type.
try:
rdf_value = self.type_descriptor.Validate(rdf_value, **kwargs)
except (__HOLE__, ValueError) as e:
raise type_info.TypeValueError(
"Assignment value must be %s, but %s can not "
"be coerced. Error: %s" % (self.type_descriptor.proto_type_name,
type(rdf_value), e))
self.wrapped_list.append((rdf_value, wire_format))
return rdf_value
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/RepeatedFieldHelper.Append
|
6,372
|
def _CopyRawData(self):
new_raw_data = {}
# We need to copy all entries in _data. Those entries are tuples of
# - an object (if it has already been deserialized)
# - the serialized object (if it has been serialized)
# - the type_info.
# To copy this, it's easiest to just copy the serialized object if it
# exists. We have to make sure though that the object is not a protobuf.
# If it is, someone else might have changed the subobject and the
# serialization is not accurate anymore. This is indicated by the dirty
# flag. Type_infos can be just copied by reference.
for name, (obj, serialized, t_info) in self._data.iteritems():
if serialized is None:
obj = copy.copy(obj)
else:
try:
if t_info.IsDirty(obj):
obj, serialized = copy.copy(obj), None
else:
obj = None
except __HOLE__:
obj = None
new_raw_data[name] = (obj, serialized, t_info)
return new_raw_data
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/RDFStruct._CopyRawData
|
6,373
|
def UnionCast(self):
union_field = getattr(self, self.union_field)
cast_field_name = str(union_field).lower()
try:
return getattr(self, cast_field_name)
except __HOLE__:
raise AttributeError("union_field not initialized.")
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/rdfvalues/structs.py/RDFProtoStruct.UnionCast
|
6,374
|
def _guess_mimetype(self, file):
"""Guess the mimetype of an uploaded file.
Uploaded files don't necessarily have valid mimetypes provided,
so attempt to guess them when they're blank.
This only works if `file` is in the path. If it's not, or guessing
fails, we fall back to a mimetype of application/octet-stream.
"""
if not is_exe_in_path('file'):
return self.DEFAULT_MIMETYPE
# The browser didn't know what this was, so we'll need to do
# some guess work. If we have 'file' available, use that to
# figure it out.
p = subprocess.Popen(['file', '--mime-type', '-b', '-'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
# Write the content from the file until file has enough data to
# make a determination.
for chunk in file.chunks():
try:
p.stdin.write(chunk)
except __HOLE__:
# file closed, so we hopefully have an answer.
break
p.stdin.close()
ret = p.wait()
if ret == 0:
mimetype = p.stdout.read().strip()
else:
mimetype = None
# Reset the read position so we can properly save this.
file.seek(0)
return mimetype or self.DEFAULT_MIMETYPE
|
IOError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/attachments/forms.py/UploadFileForm._guess_mimetype
|
6,375
|
@interactive
def _require(*names):
"""Helper for @require decorator."""
from IPython.parallel.error import UnmetDependency
user_ns = globals()
for name in names:
if name in user_ns:
continue
try:
exec('import %s'%name, user_ns)
except __HOLE__:
raise UnmetDependency(name)
return True
|
ImportError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/controller/dependency.py/_require
|
6,376
|
def test_hyper_param_accuracy():
mp.dps = 15
As = [n+1e-10 for n in range(-5,-1)]
Bs = [n+1e-10 for n in range(-12,-5)]
assert hyper(As,Bs,10).ae(-381757055858.652671927)
assert legenp(0.5, 100, 0.25).ae(-2.4124576567211311755e+144)
assert (hyp1f1(1000,1,-100)*10**24).ae(5.2589445437370169113)
assert (hyp2f1(10, -900, 10.5, 0.99)*10**24).ae(1.9185370579660768203)
assert (hyp2f1(1000,1.5,-3.5,-1.5)*10**385).ae(-2.7367529051334000764)
assert hyp2f1(-5, 10, 3, 0.5, zeroprec=500) == 0
assert (hyp1f1(-10000, 1000, 100)*10**424).ae(-3.1046080515824859974)
assert (hyp2f1(1000,1.5,-3.5,-0.75,maxterms=100000)*10**231).ae(-4.0534790813913998643)
assert legenp(2, 3, 0.25) == 0
try:
hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3])
assert 0
except __HOLE__:
pass
assert hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3], infprec=200) == inf
assert meijerg([[],[]],[[0,0,0,0],[]],0.1).ae(1.5680822343832351418)
assert (besselk(400,400)*10**94).ae(1.4387057277018550583)
mp.dps = 5
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
mp.dps = 15
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
assert hyp0f1(fadd(-20,'1e-100',exact=True), 0.25).ae(1.85014429040102783e+49)
assert hyp0f1((-20*10**100+1, 10**100), 0.25).ae(1.85014429040102783e+49)
|
ValueError
|
dataset/ETHPy150Open fredrik-johansson/mpmath/mpmath/tests/test_functions2.py/test_hyper_param_accuracy
|
6,377
|
def test_issue_239():
mp.prec = 150
x = ldexp(2476979795053773,-52)
assert betainc(206, 385, 0, 0.55, 1).ae('0.99999999999999999999996570910644857895771110649954')
mp.dps = 15
try:
u = hyp2f1(-5,5,0.5,0.5)
raise AssertionError("hyp2f1(-5,5,0.5,0.5) (failed zero detection)")
except (mp.NoConvergence, __HOLE__):
pass
|
ValueError
|
dataset/ETHPy150Open fredrik-johansson/mpmath/mpmath/tests/test_functions2.py/test_issue_239
|
6,378
|
def build_deploy_docs(docs_path):
try:
from fabric.main import load_fabfile
except __HOLE__:
warn("Couldn't build fabfile.rst, fabric not installed")
return
project_template_path = path_for_import("mezzanine.project_template")
commands = load_fabfile(os.path.join(project_template_path, "fabfile"))[1]
lines = []
for name in sorted(commands.keys()):
doc = commands[name].__doc__.strip().split("\n")[0]
lines.append(" * ``fab %s`` - %s" % (name, doc))
with open(os.path.join(docs_path, "fabfile.rst"), "w") as f:
f.write("\n".join(lines))
# Python complains if this is inside build_changelog which uses exec.
|
ImportError
|
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/utils/docs.py/build_deploy_docs
|
6,379
|
def build_changelog(docs_path, package_name="mezzanine"):
"""
Converts Mercurial commits into a changelog in RST format.
"""
project_path = os.path.join(docs_path, "..")
version_file = os.path.join(package_name, "__init__.py")
version_var = "__version__"
changelog_filename = "CHANGELOG"
changelog_file = os.path.join(project_path, changelog_filename)
versions = OrderedDict()
repo = None
ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8",
"whitespace", "README", "trans", "print debug",
"debugging", "tabs", "style", "sites", "ignore",
"tweak", "cleanup", "minor", "for changeset",
".com``", "oops", "syntax")
hotfixes = {
"40cbc47b8d8a": "1.0.9",
"a25749986abc": "1.0.10",
}
# Load the repo.
try:
from mercurial import ui, hg, error
from mercurial.commands import tag
except ImportError:
pass
else:
try:
ui = ui.ui()
repo = hg.repository(ui, project_path)
except error.RepoError:
return
if repo is None:
return
# Go through each changeset and assign it to the versions dict.
changesets = [repo.changectx(changeset) for changeset in repo.changelog]
for cs in sorted(changesets, reverse=True, key=_changeset_date):
# Check if the file with the version number is in this changeset
# and if it is, pull it out and assign it as a variable.
files = cs.files()
new_version = False
# Commit message cleanup hacks.
description = cs.description().decode("utf-8")
description = description.rstrip(".").replace("\n", ". ")
while " " in description:
description = description.replace(" ", " ")
description = description.replace(". . ", ". ").replace("...", ",")
while ".." in description:
description = description.replace("..", ".")
description = description.replace(":.", ":").replace("n'. t", "n't")
words = description.split()
# Format var names in commit.
for i, word in enumerate(words):
if (set("._") & set(word[:-1]) and set(letters) & set(word) and
"`" not in word and not word[0].isdigit()):
last = ""
if word[-1] in ",.":
last, word = word[-1], word[:-1]
words[i] = "``%s``%s" % (word, last)
description = " ".join(words)
if version_file in files:
for line in cs[version_file].data().split("\n"):
if line.startswith(version_var):
exec(line)
if locals()[version_var] == "0.1.0":
locals()[version_var] = "1.0.0"
break
versions[locals()[version_var]] = {
"changes": [],
"date": _changeset_date(cs).strftime("%b %d, %Y")
}
new_version = len(files) == 1
# Tag new versions.
hotfix = hotfixes.get(cs.hex()[:12])
if hotfix or new_version:
if hotfix:
version_tag = hotfix
else:
try:
version_tag = locals()[version_var]
except KeyError:
version_tag = None
if version_tag and version_tag not in cs.tags():
try:
tag(ui, repo, version_tag, rev=cs.hex())
print("Tagging version %s" % version_tag)
except:
pass
# Ignore changesets that are merges, bumped the version, closed
# a branch, regenerated the changelog itself, contain an ignore
# word, or contain too few words to be meaningful.
merge = len(cs.parents()) > 1
branch_closed = len(files) == 0
changelog_update = changelog_filename in files
ignored = [w for w in ignore if w.lower() in description.lower()]
too_few_words = len(description.split()) <= 3
if (merge or new_version or branch_closed or changelog_update or
ignored or too_few_words):
continue
# Ensure we have a current version and if so, add this changeset's
# description to it.
version = None
try:
version = locals()[version_var]
except __HOLE__:
if not hotfix:
continue
user = cs.user().decode("utf-8").split("<")[0].strip()
entry = "%s - %s" % (description, user)
if hotfix or entry not in versions[version]["changes"]:
if hotfix:
versions[hotfix] = {
"changes": [entry],
"date": _changeset_date(cs).strftime("%b %d, %Y"),
}
else:
versions[version]["changes"].insert(0, entry)
# Write out the changelog.
with open(changelog_file, "w") as f:
for version, version_info in versions.items():
header = "Version %s (%s)" % (version, version_info["date"])
f.write("%s\n" % header)
f.write("%s\n" % ("-" * len(header)))
f.write("\n")
if version_info["changes"]:
for change in version_info["changes"]:
f.write(" * %s\n" % change)
else:
f.write(" * No changes listed.\n")
f.write("\n")
|
KeyError
|
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/utils/docs.py/build_changelog
|
6,380
|
def build_modelgraph(docs_path, package_name="mezzanine"):
"""
Creates a diagram of all the models for mezzanine and the given
package name, generates a smaller version and add it to the
docs directory for use in model-graph.rst
"""
to_path = os.path.join(docs_path, "img", "graph.png")
build_path = os.path.join(docs_path, "build", "_images")
resized_path = os.path.join(os.path.dirname(to_path), "graph-small.png")
settings = import_dotted_path(package_name +
".project_template.project_name.settings")
apps = [a.rsplit(".")[1] for a in settings.INSTALLED_APPS
if a.startswith("mezzanine.") or a.startswith(package_name + ".")]
try:
from django_extensions.management.commands import graph_models
except ImportError:
warn("Couldn't build model_graph, django_extensions not installed")
else:
options = {"inheritance": True, "outputfile": "graph.png",
"layout": "dot"}
try:
graph_models.Command().execute(*apps, **options)
except Exception as e:
warn("Couldn't build model_graph, graph_models failed on: %s" % e)
else:
try:
move("graph.png", to_path)
except __HOLE__ as e:
warn("Couldn't build model_graph, move failed on: %s" % e)
# docs/img/graph.png should exist in the repo - move it to the build path.
try:
if not os.path.exists(build_path):
os.makedirs(build_path)
copyfile(to_path, os.path.join(build_path, "graph.png"))
except OSError as e:
warn("Couldn't build model_graph, copy to build failed on: %s" % e)
try:
from PIL import Image
image = Image.open(to_path)
image.width = 800
image.height = image.size[1] * 800 // image.size[0]
image.save(resized_path, "PNG", quality=100)
except Exception as e:
warn("Couldn't build model_graph, resize failed on: %s" % e)
return
# Copy the dashboard screenshot to the build dir too. This doesn't
# really belong anywhere, so we do it here since this is the only
# spot we deal with doc images.
d = "dashboard.png"
copyfile(os.path.join(docs_path, "img", d), os.path.join(build_path, d))
|
OSError
|
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/utils/docs.py/build_modelgraph
|
6,381
|
def _parse(self, data):
""" Parse the output from the 'mntr' 4letter word command """
h = StringIO(data)
result = {}
for line in h.readlines():
try:
key, value = self._parse_line(line)
result[key] = value
except __HOLE__:
pass # ignore broken lines
return result
|
ValueError
|
dataset/ETHPy150Open francelabs/datafari/debian7/zookeeper/src/contrib/monitoring/ganglia/zookeeper_ganglia.py/ZooKeeperServer._parse
|
6,382
|
def _parse_line(self, line):
try:
key, value = map(str.strip, line.split('\t'))
except ValueError:
raise ValueError('Found invalid line: %s' % line)
if not key:
raise ValueError('The key is mandatory and should not be empty')
try:
value = int(value)
except (__HOLE__, ValueError):
pass
return key, value
|
TypeError
|
dataset/ETHPy150Open francelabs/datafari/debian7/zookeeper/src/contrib/monitoring/ganglia/zookeeper_ganglia.py/ZooKeeperServer._parse_line
|
6,383
|
def do_check(nowip, condition):
result = False
import OpenSSL
import socket
import re
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.settimeout(1)
connection = OpenSSL.SSL.Connection(context, socket)
if condition["sni"]:
connection.set_tlsext_host_name(condition["host"])
try:
connection.connect(
(nowip, condition["port"]))
except KeyboardInterrupt:
exit()
except:
print(nowip + " Error")
return
connection.setblocking(True)
try:
connection.do_handshake()
except __HOLE__:
exit()
except OpenSSL.SSL.WantReadError:
print(nowip + " Timeout")
return
except:
print(nowip + " Error")
return
cert = connection.get_peer_certificate()
data = []
for no in range(0, cert.get_extension_count()):
if cert.get_extension(no).get_short_name() != b"subjectAltName":
continue
data = re.sub(
r"\\[\s\S]", "#",
re.sub(
r"\\x[0-9a-zA-Z]{2}", "#",
(str(cert.get_extension(no).get_data())
.replace(r"b\"", "").replace("\"", "")
.replace(r"b'", "")
.replace(r"'", "").replace("\\\\", "\\")))).split("#")
for item in data:
if item != "" and item != "0":
if item.find(condition["common_name_has"]) != -1:
print(nowip + " True, DNS Name=" + item)
result = True
else:
print(nowip + " False, DNS Name=" + item)
if len(data) == 0:
certname = OpenSSL.crypto.X509Name(cert.get_subject())
if certname.commonName.find(condition["common_name_has"]) != -1:
print(nowip + " True, CN=" + certname.commonName)
result = True
else:
print(nowip + " False, CN=" + certname.commonName)
return result
|
KeyboardInterrupt
|
dataset/ETHPy150Open futursolo/host-witness/witness.py/do_check
|
6,384
|
@fresh_login_required
@nocache
def reminder_history(pk_hash):
reminder = Reminder.objects.get_or_404(pk_hash=pk_hash,
owner=current_user._get_current_object())
history = ReminderHistory.objects.filter(reminder=reminder)
# Paginate
try:
history = Paginator(history, page=int(request.args.get('page', 1)))
except (IndexError, __HOLE__):
return redirect(url_for('user.reminders.history', pk_hash=reminder.pk_hash))
context = {
'title': 'Reminder History',
'description': 'View Reminder Sent History',
'reminder': reminder,
'history': history
}
return render_template('user/reminders/history/index.html', **context)
|
ValueError
|
dataset/ETHPy150Open shaunduncan/breezeminder/breezeminder/views/reminder.py/reminder_history
|
6,385
|
def learn_sentence(self, sentence):
"""
Learn based on the input sentence.
:param sentence: Space separated sentence to apply to Markov model
:return: No relevant return data
"""
# Split the sentence into words/parts
parts = re.findall(r"\w[\w']*", sentence.lower())
if len(parts) == 0:
return
# This is a speed optimized method to increment the relation count between --terminate-- and the last part
try:
self.raw_scores[parts[-1]]["--terminate--"] += 1
except KeyError:
try:
self.raw_scores[parts[-1]]["--terminate--"] = 1
except KeyError:
self.raw_scores[parts[-1]] = {"--terminate--": 1}
# Iterate through all the parts, and increment the relation counts for all adjacent parts
last = "--terminate--"
for x in xrange(len(parts)):
current = parts[x]
try:
self.raw_scores[last][current] += 1
except __HOLE__:
try:
self.raw_scores[last][current] = 1
except KeyError:
self.raw_scores[last] = {current: 1}
last = current
# If any of the parts of this sentence don't end up in the database, something is broken in the code
for part in parts:
if part not in self.raw_scores:
raise
|
KeyError
|
dataset/ETHPy150Open bwall/markovobfuscate/markovobfuscate/obfuscation.py/MarkovKeyState.learn_sentence
|
6,386
|
def load_plugins():
"""Load any plugin modules
We load plugin modules based on directories provided to us by the environment, as well as a default in our own folder.
Returns a list of module objects
"""
# This function is a little wacky, doesn't seem like we SHOULD have to do all this just to get the behavior we want.
# The idea will be to check out the directory contents and pick up any files that seem to match what python knows how to
# import.
# To properly load the module, we'll need to identify what type it is by the file extension
suffix_map = {}
for suffix in imp.get_suffixes():
suffix_map[suffix[0]] = suffix
plugin_directories = [DEFAULT_PLUGIN_PATH]
if 'TESTIFY_PLUGIN_PATH' in os.environ:
plugin_directories += os.environ['TESTIFY_PLUGIN_PATH'].split(':')
plugin_modules = []
for plugin_path in plugin_directories:
for file_name in os.listdir(plugin_path):
# For any file that we know how to load, try to import it
if any(file_name.endswith('.py') and not file_name.startswith('.') for suffix in suffix_map.keys()):
full_file_path = os.path.join(plugin_path, file_name)
mod_name, suffix = os.path.splitext(file_name)
# Need some unlikely-to-clash unique-ish module name
mod_name = '_testify_plugin__' + mod_name
try:
plugin_modules.append(
imp.load_source(mod_name, full_file_path),
)
except TypeError:
continue
except __HOLE__ as e:
print("Failed to import plugin %s: %r" % (full_file_path, e), file=sys.stderr)
return plugin_modules
|
ImportError
|
dataset/ETHPy150Open Yelp/Testify/testify/test_program.py/load_plugins
|
6,387
|
def api_url_patterns():
for version, resources in API_LIST:
api = CommCareHqApi(api_name='v%d.%d' % version)
for R in resources:
api.register(R())
yield (r'^', include(api.urls))
yield url(r'^v0.1/xform_es/$', XFormES.as_domain_specific_view())
# HACK: fix circular import here, to fix later
try:
from pact.api import PactAPI
except __HOLE__:
pass # maybe pact isn't installed
for view_class in DomainAPI.__subclasses__():
yield url(r'^custom/%s/v%s/$' % (view_class.api_name(), view_class.api_version()), view_class.as_view(), name="%s_%s" % (view_class.api_name(), view_class.api_version()))
yield url(r'^case/attachment/(?P<case_id>[\w\-]+)/(?P<attachment_id>.*)$', CaseAttachmentAPI.as_view(), name="api_case_attachment")
yield url(r'^form/attachment/(?P<form_id>[\w\-]+)/(?P<attachment_id>.*)$', FormAttachmentAPI.as_view(), name="api_form_attachment")
|
ImportError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/api/urls.py/api_url_patterns
|
6,388
|
def output_poll(self):
try:
name = self.token['name']
except __HOLE__:
return self.renderer.poll_raw(poll_txt=self.token['raw'])
else:
return self.renderer.poll(name=name)
|
KeyError
|
dataset/ETHPy150Open nitely/Spirit/spirit/core/utils/markdown/markdown.py/Markdown.output_poll
|
6,389
|
def create(server_):
'''
Create a single BareMetal server from a data dict.
'''
try:
# Check for required profile parameters before sending any API calls.
if server_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'scaleway',
server_['profile'],
vm_=server_) is False:
return False
except __HOLE__:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in server_:
server_['driver'] = server_.pop('provider')
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(server_['name']),
{
'name': server_['name'],
'profile': server_['profile'],
'provider': server_['driver'],
},
transport=__opts__['transport']
)
log.info('Creating a BareMetal server {0}'.format(server_['name']))
access_key = config.get_cloud_config_value(
'access_key', get_configured_provider(), __opts__, search_global=False
)
commercial_type = config.get_cloud_config_value(
'commercial_type', server_, __opts__, default='C1'
)
kwargs = {
'name': server_['name'],
'organization': access_key,
'image': get_image(server_),
'commercial_type': commercial_type,
}
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(server_['name']),
{'kwargs': kwargs},
transport=__opts__['transport']
)
try:
ret = create_node(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on Scaleway\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: {1}'.format(
server_['name'],
str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def __query_node_data(server_name):
''' Called to check if the server has a public IP address.
'''
data = show_instance(server_name, 'action')
if data and data.get('public_ip'):
return data
return False
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(server_['name'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', server_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', server_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(server_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
server_['ssh_host'] = data['public_ip']['address']
server_['ssh_password'] = config.get_cloud_config_value(
'ssh_password', server_, __opts__
)
ret = salt.utils.cloud.bootstrap(server_, __opts__)
ret.update(data)
log.info('Created BareMetal server \'{0[name]}\''.format(server_))
log.debug(
'\'{0[name]}\' BareMetal server creation details:\n{1}'.format(
server_, pprint.pformat(data)
)
)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(server_['name']),
{
'name': server_['name'],
'profile': server_['profile'],
'provider': server_['driver'],
},
transport=__opts__['transport']
)
return ret
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/scaleway.py/create
|
6,390
|
def _get_node(name):
for attempt in reversed(list(range(10))):
try:
return list_nodes_full()[name]
except __HOLE__:
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
name, attempt
)
)
# Just a little delay between attempts...
time.sleep(0.5)
return {}
|
KeyError
|
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/scaleway.py/_get_node
|
6,391
|
def stop(self):
try:
self.logger.removeHandler(self.handlers[self.logger_name])
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/neo4j/util.py/Watcher.stop
|
6,392
|
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, __HOLE__):
msg = (_('%(field_name)s: Invalid IP address (value=%(ip)s)')
% {'field_name': field_name, 'ip': ip})
raise forms.ValidationError(msg)
|
ValueError
|
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/dashboards/project/networks/workflows.py/CreateSubnetDetailAction._convert_ip_address
|
6,393
|
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, __HOLE__):
msg = (_('%(field_name)s: Invalid IP address (value=%(network)s)')
% {'field_name': field_name, 'network': network})
raise forms.ValidationError(msg)
|
ValueError
|
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/dashboards/project/networks/workflows.py/CreateSubnetDetailAction._convert_ip_network
|
6,394
|
def configure(self, candidates=None):
ctx = self.ctx
if candidates is None:
if sys.platform == "win32":
candidates = ["msvc", "gxx"]
else:
candidates = ["gxx", "cxx"]
def _detect_cxx():
detected = None
sys.path.insert(0, os.path.dirname(yaku.tools.__file__))
try:
for cxx_type in candidates:
sys.stderr.write("Looking for %s (c++ compiler) ... " % cxx_type)
try:
mod = __import__(cxx_type)
if mod.detect(ctx):
sys.stderr.write("yes\n")
detected = cxx_type
break
except __HOLE__:
raise
except:
pass
sys.stderr.write("no!\n")
return detected
finally:
sys.path.pop(0)
cxx_type = _detect_cxx()
if cxx_type is None:
raise ValueError("No CXX compiler found!")
cxx = ctx.load_tool(cxx_type)
cxx.setup(ctx)
if sys.platform != "win32":
ar = ctx.load_tool("ar")
ar.setup(ctx)
self.configured = True
|
ImportError
|
dataset/ETHPy150Open cournape/Bento/bento/private/_yaku/yaku/tools/cxxtasks.py/CXXBuilder.configure
|
6,395
|
def setUp(self):
super(CollectionTestCase, self).setUp()
if self.service.splunk_version[0] >= 5 and 'modular_input_kinds' not in collections:
collections.append('modular_input_kinds') # Not supported before Splunk 5.0
else:
logging.info("Skipping modular_input_kinds; not supported by Splunk %s" % \
'.'.join(str(x) for x in self.service.splunk_version))
for saved_search in self.service.saved_searches:
if saved_search.name.startswith('delete-me'):
try:
for job in saved_search.history():
job.cancel()
self.service.saved_searches.delete(saved_search.name)
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open splunk/splunk-sdk-python/tests/test_collection.py/CollectionTestCase.setUp
|
6,396
|
def loop(self, sleep_time=None, callback=None):
self.trigger_init()
try:
self._notifier.loop(callback)
except __HOLE__:
self._notifier.stop()
raise
|
KeyboardInterrupt
|
dataset/ETHPy150Open jeffh/sniffer/sniffer/scanner/pyinotify_scanner.py/PyINotifyScanner.loop
|
6,397
|
def run(self):
""" Runs the event loop
This method starts the main asyncio event loop and stays in the loop until a SIGINT or
SIGTERM is received (see `_sigint_handler`)
"""
try:
self.logger.debug("Starting event loop and calling `get_ready`")
if self.is_safe():
self.logger.debug("System safe, calling get_ready")
self._loop.call_soon(self.get_ready)
else:
self.logger.warning("Not safe, calling wait_until_safe")
self._loop.call_soon(self.wait_until_safe)
self._loop.run_forever()
except __HOLE__:
self.logger.warning("Interrupted")
self.power_down()
finally:
self.logger.debug("Event loop stopped")
if self._loop.is_running():
self.logger.debug("Stopping event loop")
self._loop.stop()
if not self._loop.is_closed():
self.logger.debug("Closing event loop")
self._loop.close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open panoptes/POCS/panoptes/state/event.py/PanEventManager.run
|
6,398
|
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except __HOLE__:
pass
else:
raise AssertionError("a and b are found equal but are not")
|
AssertionError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/testing/tests/test_utils.py/_GenericTest._test_not_equal
|
6,399
|
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in this sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in this sequence.
Raises
------
ValueError
If `subsequence` is not present in this sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than this sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except __HOLE__:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
|
ValueError
|
dataset/ETHPy150Open biocore/scikit-bio/skbio/sequence/_sequence.py/Sequence.index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.