Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
8,700
|
def __init__(self, **kwargs):
"""Initializes the Jinja2 Environment object while loading extensions.
Does the following:
1. Establishes default_extensions (currently just a Time feature)
2. Reads extensions set in the cookiecutter.json _extensions key.
3. Attempts to load the extensions. Provides useful error if fails.
"""
context = kwargs.pop('context', {})
default_extensions = [
'jinja2_time.TimeExtension',
]
extensions = default_extensions + self._read_extensions(context)
try:
super(ExtensionLoaderMixin, self).__init__(
extensions=extensions,
**kwargs
)
except __HOLE__ as err:
raise UnknownExtension('Unable to load extension: {}'.format(err))
|
ImportError
|
dataset/ETHPy150Open audreyr/cookiecutter/cookiecutter/environment.py/ExtensionLoaderMixin.__init__
|
8,701
|
def _read_extensions(self, context):
"""Return a list of extensions as str to be passed on to the jinja2
env. If context does not contain the relevant info, return an empty
list instead.
"""
try:
extensions = context['cookiecutter']['_extensions']
except __HOLE__:
return []
else:
return [str(ext) for ext in extensions]
|
KeyError
|
dataset/ETHPy150Open audreyr/cookiecutter/cookiecutter/environment.py/ExtensionLoaderMixin._read_extensions
|
8,702
|
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set(x for x in dir(exceptions) if not x.startswith('_'))
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(__builtin__, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.assertIn(superclass_name, exc_set)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(__builtin__, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.assertTrue(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except __HOLE__:
pass
self.assertIn(exc_name, exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.assertEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
|
TypeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pep352.py/ExceptionClassTests.test_inheritance
|
8,703
|
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except __HOLE__:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
|
TypeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pep352.py/UsageTests.raise_fails
|
8,704
|
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise StandardError
except object_:
pass
except __HOLE__:
pass
except StandardError:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise StandardError
except (object_,):
pass
except TypeError:
return
except StandardError:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
|
TypeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pep352.py/UsageTests.catch_fails
|
8,705
|
def test_catch_string(self):
# Catching a string should trigger a DeprecationWarning.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings("error")
str_exc = "spam"
with self.assertRaises(DeprecationWarning):
try:
raise StandardError
except str_exc:
pass
# Make sure that even if the string exception is listed in a tuple
# that a warning is raised.
with self.assertRaises(DeprecationWarning):
try:
raise StandardError
except (__HOLE__, str_exc):
pass
|
AssertionError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pep352.py/UsageTests.test_catch_string
|
8,706
|
def remove_role(self, user, assignee, role):
"""
Removes the role from the assignee. The 'user' argument is used for
logging purposes.
"""
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_as_assignee = Participant.objects.get(workflowactivity=self,
user=assignee)
if role in p_as_assignee.roles.all():
p_as_assignee.roles.remove(role)
name = assignee.get_full_name() if assignee.get_full_name() else assignee.username
note = _('Role "%s" removed from %s')%(role.__unicode__(), name)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
role_removed.send(sender=wh)
return wh
else:
# The role isn't associated with the assignee anyway so there is
# nothing to do
return None
except __HOLE__:
# If we can't find the assignee as a participant then there is
# nothing to do
return None
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ntoll/workflow/workflow/models.py/WorkflowActivity.remove_role
|
8,707
|
def clear_roles(self, user, assignee):
"""
Clears all the roles from assignee. The 'user' argument is used for
logging purposes.
"""
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_as_assignee = Participant.objects.get(workflowactivity=self,
user=assignee)
p_as_assignee.roles.clear()
name = assignee.get_full_name() if assignee.get_full_name() else assignee.username
note = _('All roles removed from %s')%name
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
role_removed.send(sender=wh)
return wh
except __HOLE__:
# If we can't find the assignee then there is nothing to do
pass
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ntoll/workflow/workflow/models.py/WorkflowActivity.clear_roles
|
8,708
|
def disable_participant(self, user, user_to_disable, note):
"""
Mark the user_to_disable as disabled. Must include a note explaining
reasons for this action. Also the 'user' arg is used for logging who
carried this out
"""
if not note:
raise UnableToDisableParticipant, __('Must supply a reason for'\
' disabling a participant. None given.')
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_to_disable = Participant.objects.get(workflowactivity=self,
user=user_to_disable)
if not p_to_disable.disabled:
p_to_disable.disabled = True
p_to_disable.save()
name = user_to_disable.get_full_name() if user_to_disable.get_full_name() else user_to_disable.username
note = _('Participant %s disabled with the reason: %s')%(name, note)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
return wh
else:
# They're already disabled
return None
except __HOLE__:
# If we can't find the assignee then there is nothing to do
return None
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ntoll/workflow/workflow/models.py/WorkflowActivity.disable_participant
|
8,709
|
def enable_participant(self, user, user_to_enable, note):
"""
Mark the user_to_enable as enabled. Must include a note explaining
reasons for this action. Also the 'user' arg is used for logging who
carried this out
"""
if not note:
raise UnableToEnableParticipant, __('Must supply a reason for'\
' enabling a disabled participant. None given.')
try:
p_as_user = Participant.objects.get(workflowactivity=self,
user=user, disabled=False)
p_to_enable = Participant.objects.get(workflowactivity=self,
user=user_to_enable)
if p_to_enable.disabled:
p_to_enable.disabled = False
p_to_enable.save()
name = user_to_enable.get_full_name() if user_to_enable.get_full_name() else user_to_enable.username
note = _('Participant %s enabled with the reason: %s')%(name,
note)
current_state = self.current_state().state if self.current_state() else None
deadline = self.current_state().deadline if self.current_state() else None
wh = WorkflowHistory(
workflowactivity=self,
state=current_state,
log_type=WorkflowHistory.ROLE,
participant=p_as_user,
note=note,
deadline=deadline
)
wh.save()
return wh
else:
# The participant is already enabled
return None
except __HOLE__:
# If we can't find the participant then there is nothing to do
return None
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ntoll/workflow/workflow/models.py/WorkflowActivity.enable_participant
|
8,710
|
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except __HOLE__:
self._overrides = o = _DictProxy(self, 'cmdline')
return o
|
AttributeError
|
dataset/ETHPy150Open wcong/ants/ants/settings/__init__.py/Settings.overrides
|
8,711
|
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except __HOLE__:
self._defaults = o = _DictProxy(self, 'default')
return o
|
AttributeError
|
dataset/ETHPy150Open wcong/ants/ants/settings/__init__.py/Settings.defaults
|
8,712
|
@celery.task
def stat_log(key, value=1):
try:
sc = StatsCount.objects.create(key=key, value=value)
except __HOLE__:
return None
return sc
|
ValueError
|
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/stats/tasks.py/stat_log
|
8,713
|
@bothmethod
def get_plot(self_or_cls, obj):
"""
Given a HoloViews Viewable return a corresponding plot instance.
"""
if not isinstance(obj, Plot) and not displayable(obj):
obj = collate(obj)
# Initialize DynamicMaps with first data item
dmaps = obj.traverse(lambda x: x, specs=[DynamicMap])
for dmap in dmaps:
if dmap.sampled:
# Skip initialization until plotting code
continue
if dmap.call_mode == 'key':
dmap[dmap._initial_key()]
else:
try:
next(dmap)
except __HOLE__: # Exhausted DynamicMap
raise SkipRendering("DynamicMap generator exhausted.")
if not isinstance(obj, Plot):
obj = Layout.from_values(obj) if isinstance(obj, AdjointLayout) else obj
plot_opts = self_or_cls.plot_options(obj, self_or_cls.size)
plot = self_or_cls.plotting_class(obj)(obj, **plot_opts)
plot.update(0)
else:
plot = obj
return plot
|
StopIteration
|
dataset/ETHPy150Open ioam/holoviews/holoviews/plotting/renderer.py/Renderer.get_plot
|
8,714
|
@classmethod
def plotting_class(cls, obj):
"""
Given an object or Element class, return the suitable plotting
class needed to render it with the current renderer.
"""
if isinstance(obj, AdjointLayout) or obj is AdjointLayout:
obj = Layout
if isinstance(obj, type):
element_type = obj
else:
element_type = obj.type if isinstance(obj, HoloMap) else type(obj)
try:
plotclass = Store.registry[cls.backend][element_type]
except __HOLE__:
raise Exception("No corresponding plot type found for %r" % type(obj))
return plotclass
|
KeyError
|
dataset/ETHPy150Open ioam/holoviews/holoviews/plotting/renderer.py/Renderer.plotting_class
|
8,715
|
def get_authenticated_user(self, callback):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
:param callback:
:returns:
"""
request_key = self.request.args.get('oauth_token')
oauth_verifier = self.request.args.get('oauth_verifier', None)
request_cookie = self.request.cookies.get('_oauth_request_token')
if request_cookie:
parts = request_cookie.split('|')
if len(parts) == 2:
try:
cookie_key = base64.b64decode(parts[0])
cookie_secret = base64.b64decode(parts[1])
except __HOLE__, e:
# TypeError: Incorrect padding
logging.exception(e)
request_cookie = None
else:
request_cookie = None
if not request_cookie:
return callback(None)
self.session_store.delete_cookie('_oauth_request_token')
if cookie_key != request_key:
return callback(None)
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token['verifier'] = oauth_verifier
try:
url = self._oauth_access_token_url(token)
response = urlfetch.fetch(url, deadline=10)
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
return self._on_access_token(callback, response)
|
TypeError
|
dataset/ETHPy150Open moraes/tipfy/tipfy/auth/oauth.py/OAuthMixin.get_authenticated_user
|
8,716
|
def _get_normalized_parameters(parameters, query):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in parameters.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((_to_utf8(key), _to_utf8(value)))
else:
try:
value = list(value)
except __HOLE__, e:
assert 'is not iterable' in str(e)
items.append((_to_utf8(key), _to_utf8(value)))
else:
items.extend((_to_utf8(key), _to_utf8(item)) for item in value)
url_items = _split_url_string(query).items()
url_items = [(_to_utf8(k), _to_utf8(v))
for k, v in url_items if k != 'oauth_signature']
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
|
TypeError
|
dataset/ETHPy150Open moraes/tipfy/tipfy/auth/oauth.py/_get_normalized_parameters
|
8,717
|
def __str__(self):
try:
label = self.fields_mapping[self.field_ident].label
except __HOLE__:
return u''
else:
return _('{0} to {1}').format(label, self.name)
|
KeyError
|
dataset/ETHPy150Open fusionbox/django-widgy/widgy/contrib/form_builder/models.py/FieldMappingValue.__str__
|
8,718
|
def update_mapping(self, mapping, form):
try:
form_field_name = self.fields_mapping[self.field_ident].get_formfield_name()
mapping[self.name] = form.cleaned_data[form_field_name]
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open fusionbox/django-widgy/widgy/contrib/form_builder/models.py/FieldMappingValue.update_mapping
|
8,719
|
def get_to_emails(self, form):
try:
to = [f for f in self.parent_form.depth_first_order()
if hasattr(f, 'ident') and f.ident == self.to_ident][0]
except __HOLE__:
# no matching fields found, or to_ident is blank
return []
else:
return [form.cleaned_data[to.get_formfield_name()]]
|
IndexError
|
dataset/ETHPy150Open fusionbox/django-widgy/widgy/contrib/form_builder/models.py/EmailUserHandler.get_to_emails
|
8,720
|
def _get_renderer_settings(self, chunk_index=None, **kwargs):
"""Calculate the render settings for the display of a diff.
This will calculate settings based on user preferences and URL
parameters. It does not calculate the state of any DiffSets or
FileDiffs.
"""
highlighting = get_enable_highlighting(self.request.user)
try:
lines_of_context = self.request.GET.get('lines-of-context', '')
lines_of_context = [int(i) for i in lines_of_context.split(',', 1)]
except (__HOLE__, ValueError):
lines_of_context = None
if chunk_index is not None:
try:
chunk_index = int(chunk_index)
except (TypeError, ValueError):
chunk_index = None
if lines_of_context:
collapse_all = True
elif chunk_index is not None:
# If we're currently expanding part of a chunk, we want to render
# the entire chunk without any lines collapsed. In the case of
# showing a range of lines, we're going to get all chunks and then
# only show the range. This is so that we won't have separate
# cached entries for each range.
collapse_all = False
else:
collapse_all = get_collapse_diff(self.request)
show_deleted = (self.request.GET.get('show-deleted') == '1')
return {
'chunk_index': chunk_index,
'collapse_all': collapse_all,
'highlighting': highlighting,
'lines_of_context': lines_of_context,
'show_deleted': show_deleted,
}
|
TypeError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/diffviewer/views.py/DiffFragmentView._get_renderer_settings
|
8,721
|
def _get_requested_diff_file(self, diffset, filediff, interdiffset):
"""Fetches information on the requested diff.
This will look up information on the diff that's to be rendered
and return it, if found. It may also augment it with additional
data.
The file will not contain chunk information. That must be specifically
populated later.
"""
files = get_diff_files(diffset, filediff, interdiffset,
request=self.request)
if files:
assert len(files) == 1
file = files[0]
if 'index' in self.request.GET:
try:
file['index'] = int(self.request.GET.get('index'))
except __HOLE__:
pass
return file
return None
|
ValueError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/diffviewer/views.py/DiffFragmentView._get_requested_diff_file
|
8,722
|
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except __HOLE__:
return False
else:
return True
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/utils/itercompat.py/is_iterable
|
8,723
|
@contextmanager
def bounce_lock(name):
"""Acquire a bounce lockfile for the name given. The name should generally
be the service namespace being bounced.
This is a contextmanager. Please use it via 'with bounce_lock(name):'.
:param name: The lock name to acquire"""
lockfile = '/var/lock/%s.lock' % name
fd = open(lockfile, 'w')
remove = False
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
remove = True
yield
except __HOLE__:
raise LockHeldException("Service %s is already being bounced!" % name)
finally:
fd.close()
if remove:
os.remove(lockfile)
|
IOError
|
dataset/ETHPy150Open Yelp/paasta/paasta_tools/bounce_lib.py/bounce_lock
|
8,724
|
def NextKnot(self):
"""The knot after the once currently indexed."""
try:
return self.knots[self.index + 1]
except __HOLE__:
return INFINITY
|
IndexError
|
dataset/ETHPy150Open google/rfmt/formatter/support.py/Solution.NextKnot
|
8,725
|
def run(self, files):
reprounzip.common.record_usage(upload_files=len(files))
input_files = {n: f.path
for n, f in iteritems(self.get_config().inputs_outputs)
if f.read_runs}
# No argument: list all the input files and exit
if not files:
print("Input files:")
for input_name in sorted(input_files):
assigned = self.input_files.get(input_name)
if assigned is None:
assigned = "(original)"
elif assigned is False:
assigned = "(not created)"
elif assigned is True:
assigned = "(generated)"
else:
assert isinstance(assigned, (bytes, unicode_))
print(" %s: %s" % (input_name, assigned))
return
self.prepare_upload(files)
try:
# Upload files
for filespec in files:
filespec_split = filespec.rsplit(':', 1)
if len(filespec_split) != 2:
logging.critical("Invalid file specification: %r",
filespec)
sys.exit(1)
local_path, input_name = filespec_split
try:
input_path = input_files[input_name]
except __HOLE__:
logging.critical("Invalid input file: %r", input_name)
sys.exit(1)
temp = None
if not local_path:
# Restore original file from pack
logging.debug("Restoring input file %s", input_path)
fd, temp = Path.tempfile(prefix='reprozip_input_')
os.close(fd)
local_path = self.extract_original_input(input_name,
input_path,
temp)
if local_path is None:
temp.remove()
logging.warning("No original packed, can't restore "
"input file %s", input_name)
continue
else:
local_path = Path(local_path)
logging.debug("Uploading file %s to %s",
local_path, input_path)
if not local_path.exists():
logging.critical("Local file %s doesn't exist",
local_path)
sys.exit(1)
self.upload_file(local_path, input_path)
if temp is not None:
temp.remove()
self.input_files.pop(input_name, None)
else:
self.input_files[input_name] = local_path.absolute().path
finally:
self.finalize()
|
KeyError
|
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/unpackers/common/misc.py/FileUploader.run
|
8,726
|
def extract_original_input(self, input_name, input_path, temp):
tar = tarfile.open(str(self.target / self.data_tgz), 'r:*')
try:
member = tar.getmember(str(join_root(PosixPath('DATA'),
input_path)))
except __HOLE__:
return None
member = copy.copy(member)
member.name = str(temp.components[-1])
tar.extract(member, str(temp.parent))
tar.close()
return temp
|
KeyError
|
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/unpackers/common/misc.py/FileUploader.extract_original_input
|
8,727
|
def run(self, files, all_):
reprounzip.common.record_usage(download_files=len(files))
output_files = {n: f.path
for n, f in iteritems(self.get_config().inputs_outputs)
if f.write_runs}
# No argument: list all the output files and exit
if not (all_ or files):
print("Output files:")
for output_name in output_files:
print(" %s" % output_name)
return
# Parse the name[:path] syntax
resolved_files = []
all_files = set(output_files)
for filespec in files:
filespec_split = filespec.split(':', 1)
if len(filespec_split) == 1:
output_name = local_path = filespec
elif len(filespec_split) == 2:
output_name, local_path = filespec_split
else:
logging.critical("Invalid file specification: %r",
filespec)
sys.exit(1)
local_path = Path(local_path) if local_path else None
all_files.discard(output_name)
resolved_files.append((output_name, local_path))
# If all_ is set, add all the files that weren't explicitely named
if all_:
for output_name in all_files:
resolved_files.append((output_name, Path(output_name)))
self.prepare_download(resolved_files)
success = True
try:
# Download files
for output_name, local_path in resolved_files:
try:
remote_path = output_files[output_name]
except __HOLE__:
logging.critical("Invalid output file: %r", output_name)
sys.exit(1)
logging.debug("Downloading file %s", remote_path)
if local_path is None:
ret = self.download_and_print(remote_path)
else:
ret = self.download(remote_path, local_path)
if ret is None:
ret = True
warnings.warn("download() returned None instead of "
"True/False, assuming True",
category=DeprecationWarning)
if not ret:
success = False
if not success:
sys.exit(1)
finally:
self.finalize()
|
KeyError
|
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/unpackers/common/misc.py/FileDownloader.run
|
8,728
|
def get_runs(runs, selected_runs, cmdline):
"""Selects which run(s) to execute based on parts of the command-line.
Will return an iterable of run numbers. Might also fail loudly or exit
after printing the original command-line.
"""
name_map = dict((r['id'], i) for i, r in enumerate(runs) if 'id' in r)
run_list = []
def parse_run(s):
try:
r = int(s)
except __HOLE__:
logging.critical("Error: Unknown run %s", s)
raise UsageError
if r < 0 or r >= len(runs):
logging.critical("Error: Expected 0 <= run <= %d, got %d",
len(runs) - 1, r)
sys.exit(1)
return r
if selected_runs is None:
run_list = list(irange(len(runs)))
else:
for run_item in selected_runs.split(','):
run_item = run_item.strip()
if run_item in name_map:
run_list.append(name_map[run_item])
continue
sep = run_item.find('-')
if sep == -1:
run_list.append(parse_run(run_item))
else:
if sep > 0:
first = parse_run(run_item[:sep])
else:
first = 0
if sep + 1 < len(run_item):
last = parse_run(run_item[sep + 1:])
else:
last = len(runs) - 1
if last < first:
logging.critical("Error: Last run number should be "
"greater than the first")
sys.exit(1)
run_list.extend(irange(first, last + 1))
# --cmdline without arguments: display the original command-line
if cmdline == []:
print("Original command-lines:")
for run in run_list:
print(' '.join(shell_escape(arg)
for arg in runs[run]['argv']))
sys.exit(0)
return run_list
|
ValueError
|
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/unpackers/common/misc.py/get_runs
|
8,729
|
def interruptible_call(*args, **kwargs):
assert signal.getsignal(signal.SIGINT) == signal.default_int_handler
proc = [None]
def _sigint_handler(signum, frame):
if proc[0] is not None:
try:
proc[0].send_signal(signum)
except __HOLE__:
pass
signal.signal(signal.SIGINT, _sigint_handler)
try:
proc[0] = subprocess.Popen(*args, **kwargs)
return proc[0].wait()
finally:
signal.signal(signal.SIGINT, signal.default_int_handler)
|
OSError
|
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/unpackers/common/misc.py/interruptible_call
|
8,730
|
def main():
"""Run all relevant aspects of ok.py."""
args = parse_input()
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
# Checking user's Python bit version
bit_v = (8 * struct.calcsize("P"))
log.debug("Python bit version: {}".format(bit_v))
log.debug(args)
if args.version:
print("okpy=={}".format(client.__version__))
exit(0)
elif args.update:
print("Current version: {}".format(client.__version__))
did_update = software_update.check_version(
args.server, client.__version__, client.FILE_NAME, timeout=10)
exit(not did_update) # exit with error if ok failed to update
assign = None
try:
if args.authenticate:
auth.authenticate(True)
# Instantiating assignment
assign = assignment.load_assignment(args.config, args)
if args.tests:
print('Available tests:')
for name in assign.test_map:
print(' ' + name)
exit(0)
msgs = messages.Messages()
for name, proto in assign.protocol_map.items():
log.info('Execute {}.run()'.format(name))
proto.run(msgs)
msgs['timestamp'] = str(datetime.now())
except ex.LoadingException as e:
log.warning('Assignment could not load', exc_info=True)
print('Error loading assignment: ' + str(e))
except ex.AuthenticationException as e:
log.warning('Authentication exception occurred', exc_info=True)
print('Authentication error: {0}'.format(e))
except ex.OkException as e:
log.warning('General OK exception occurred', exc_info=True)
print('Error: ' + str(e))
except __HOLE__:
log.info('KeyboardInterrupt received.')
finally:
if not args.no_update:
try:
software_update.check_version(args.server, client.__version__,
client.FILE_NAME)
except KeyboardInterrupt:
pass
if assign:
assign.dump_tests()
|
KeyboardInterrupt
|
dataset/ETHPy150Open Cal-CS-61A-Staff/ok-client/client/cli/ok.py/main
|
8,731
|
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
try:
import magic
except ImportError:
# no lib magic !
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
self.files[i] = "Unknown"
return self.files
if self.files != {}:
return self.files
builtin_magic = 0
try:
getattr(magic, "MagicException")
except __HOLE__:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = ms.buffer(buffer)
if self.files[i] is None:
self.files[i] = "Unknown"
else:
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
else:
m = magic.Magic(magic_file=self.magic_file)
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = m.from_buffer(buffer)
if self.files[i] is None:
self.files[i] = "Unknown"
else:
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
return self.files
|
AttributeError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_files_types
|
8,732
|
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: string, string, int
"""
if self.files == {}:
self.get_files_types()
for i in self.get_files():
try:
yield i, self.files[i], self.files_crc32[i]
except __HOLE__:
yield i, "", ""
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_files_information
|
8,733
|
def get_file(self, filename):
"""
Return the raw data of the specified filename
:rtype: string
"""
try:
return self.zip.read(filename)
except __HOLE__:
raise FileNotPresent(filename)
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_file
|
8,734
|
def get_details_permissions(self):
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions:
perm = i
pos = i.rfind(".")
if pos != -1:
perm = i[pos + 1:]
try:
l[i] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][perm]
except __HOLE__:
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_details_permissions
|
8,735
|
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module["AOSP_PERMISSIONS"][i]
except __HOLE__:
# if we have not found permission do nothing
continue
return l
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_requested_aosp_permissions_details
|
8,736
|
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_android_manifest_axml
|
8,737
|
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_android_manifest_xml
|
8,738
|
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except __HOLE__:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read(
"resources.arsc"))
return self.arsc["resources.arsc"]
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.get_android_resources
|
8,739
|
def show(self):
self.get_files_types()
print "FILES: "
for i in self.get_files():
try:
print "\t", i, self.files[i], "%x" % self.files_crc32[i]
except __HOLE__:
print "\t", i, "%x" % self.files_crc32[i]
print "DECLARED PERMISSIONS:"
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print "\t", i
print "REQUESTED PERMISSIONS:"
requested_permissions = self.get_requested_permissions()
for i in requested_permissions:
print "\t", i
print "MAIN ACTIVITY: ", self.get_main_activity()
print "ACTIVITIES: "
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print "\t", i, filters or ""
print "SERVICES: "
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print "\t", i, filters or ""
print "RECEIVERS: "
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print "\t", i, filters or ""
print "PROVIDERS: ", self.get_providers()
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/APK.show
|
8,740
|
def getPrefixByUri(self, uri):
try:
return self.m_uriprefix[uri]
except __HOLE__:
return -1
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/AXMLParser.getPrefixByUri
|
8,741
|
def getPrefix(self):
try:
return self.sb.getString(self.m_uriprefix[self.m_namespaceUri])
except __HOLE__:
return u''
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/AXMLParser.getPrefix
|
8,742
|
def get_resource_dimen(self, ate):
try:
return [
ate.get_value(), "%s%s" % (
complexToFloat(ate.key.get_data()),
DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])
]
except __HOLE__:
androconf.debug("Out of range dimension unit index for %s: %s" % (
complexToFloat(ate.key.get_data()),
ate.key.get_data() & COMPLEX_UNIT_MASK))
return [ate.get_value(), ate.key.get_data()]
# FIXME
|
IndexError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_resource_dimen
|
8,743
|
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (
i[0], i[1], i[2])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_public_resources
|
8,744
|
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_string_resources
|
8,745
|
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_strings_resources
|
8,746
|
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0],
i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_id_resources
|
8,747
|
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_bool_resources
|
8,748
|
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_integer_resources
|
8,749
|
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_color_resources
|
8,750
|
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except __HOLE__:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_dimen_resources
|
8,751
|
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_id
|
8,752
|
def get_res_configs(self, rid, config=None):
self._analyse()
if not rid:
raise ValueError("'rid' should be set")
try:
res_options = self.resource_values[rid]
if len(res_options) > 1 and config:
return [(
config,
res_options[config])]
else:
return res_options.items()
except __HOLE__:
return []
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_res_configs
|
8,753
|
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_string
|
8,754
|
def get_res_id_by_key(self, package_name, resource_type, key):
try:
return self.resource_keys[package_name][resource_type][key]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/ARSCParser.get_res_id_by_key
|
8,755
|
def get_arsc_info(arscobj):
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(
package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except __HOLE__:
pass
return buff
|
AttributeError
|
dataset/ETHPy150Open androguard/androguard/androguard/core/bytecodes/apk.py/get_arsc_info
|
8,756
|
def verify_bucket_writable(bucket_name):
"""Verify the application can write to the specified bucket.
Args:
bucket_name: The bucket to verify.
Raises:
BackupValidationException: If the bucket is not writable.
"""
path = '/gs/%s' % bucket_name
try:
file_names = files.gs.listdir(path,
{'prefix': TEST_WRITE_FILENAME_PREFIX,
'max_keys': MAX_KEYS_LIST_SIZE})
except (files.InvalidParameterError, files.PermissionDeniedError):
raise BackupValidationException('Bucket "%s" not accessible' % bucket_name)
except files.InvalidFileNameError:
raise BackupValidationException('Bucket "%s" does not exist' % bucket_name)
file_name = '%s/%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX)
file_name_try = 0
while True:
if file_name_try >= MAX_TEST_FILENAME_TRIES:
return
if file_name not in file_names:
break
gen = random.randint(0, 9999)
file_name = '%s/%s_%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX, gen)
file_name_try += 1
try:
test_file = files.open(files.gs.create(file_name), 'a', exclusive_lock=True)
try:
test_file.write('test')
finally:
test_file.close(finalize=True)
except files.PermissionDeniedError:
raise BackupValidationException('Bucket "%s" is not writable' % bucket_name)
try:
files.delete(file_name)
except (files.InvalidArgumentError, files.InvalidFileNameError, __HOLE__):
logging.warn('Failed to delete test file %s', file_name)
|
IOError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/datastore_admin/backup_handler.py/verify_bucket_writable
|
8,757
|
def get_context_data(self, **kwargs):
kwargs = super(StaticContextMixin, self).get_context_data(**kwargs)
try:
kwargs.update(self.get_static_context())
except (TypeError, __HOLE__):
raise ImproperlyConfigured(
'{0}.static_context must be a dictionary or container '
'of two-tuples.'.format(self.__class__.__name__))
else:
return kwargs
|
ValueError
|
dataset/ETHPy150Open brack3t/django-braces/braces/views/_other.py/StaticContextMixin.get_context_data
|
8,758
|
def admin_media_prefix():
"""
Returns the string contained in the setting ADMIN_MEDIA_PREFIX.
"""
try:
from django.conf import settings
except __HOLE__:
return ''
return settings.ADMIN_MEDIA_PREFIX
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/admin/templatetags/adminmedia.py/admin_media_prefix
|
8,759
|
def test_egg5(self):
"""Loading an app from an egg that has an import error in its models module raises that error"""
egg_name = '%s/brokenapp.egg' % self.egg_dir
sys.path.append(egg_name)
self.assertRaises(ImportError, load_app, 'broken_app')
try:
load_app('broken_app')
except __HOLE__ as e:
# Make sure the message is indicating the actual
# problem in the broken app.
self.assertTrue("modelz" in e.args[0])
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/regressiontests/app_loading/tests.py/EggLoadingTest.test_egg5
|
8,760
|
def _object_lister(self, cursor):
try:
for row in cursor:
yield self._object_from_row(row, cursor.description)
except __HOLE__:
cursor.close()
raise StopIteration
|
StopIteration
|
dataset/ETHPy150Open darcyliu/storyboard/boto/sdb/db/manager/pgmanager.py/PGManager._object_lister
|
8,761
|
def get_object(self, request, content_type_id, object_id):
ct = get_object_or_404(ContentType, pk=content_type_id)
try:
obj = ct.get_object_for_this_type(pk=object_id)
except __HOLE__:
raise Http404('No %s matches the given query.' % ct.model_class()._meta.object_name)
return obj
|
ObjectDoesNotExist
|
dataset/ETHPy150Open justquick/django-activity-stream/actstream/feeds.py/ObjectActivityMixin.get_object
|
8,762
|
def acquire_data( hdf_files = None, var_names=None, concatenate_size = None, bounds = None):
import h5py, numpy
if hdf_files is None:
raise NotFound('No open_hdf_files provided to extract data from.')
if var_names is None:
raise NotFound('Variable names where not provided.')
if concatenate_size is None:
raise NotFound('The concatenation size was not provided')
open_files = []
try:
for hdf_file in hdf_files:
#-------------------------------------------------------------------------------------------------------
# make a file object
#-------------------------------------------------------------------------------------------------------
try:
file = h5py.File(hdf_file,'r')
except __HOLE__ as ioe:
log.exception('Unable to open file: "%s"', hdf_file)
# Try again?
try:
file = h5py.File(hdf_file,'r')
except:
log.exception('Still Unable to open file: "%s" !', hdf_file)
# If we are only opening one file - we must fail - but otherwise for now, just let it go!
if len(hdf_files) == 1:
raise ioe
open_files.append(file)
gen = _acquire_hdf_data(open_hdf_files=open_files, var_names=var_names, concatenate_size=concatenate_size, bounds=bounds)
# run the generator yielding to the caller
for item in gen:
yield item
finally:
# always clean up!
for file in open_files:
file.close()
|
IOError
|
dataset/ETHPy150Open ooici/pyon/prototype/hdf/hdf_array_iterator.py/acquire_data
|
8,763
|
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = optparse.OptionParser(version="%prog version: $Id: contigs2random_sample.py 2871 2010-03-03 10:20:44Z nicki $",
usage=globals()["__doc__"])
parser.add_option("-m", "--species-map", dest="species_map", type="string",
help="text file specifying the mapping between contig and genome")
parser.add_option("-g", "--genome-dir", dest="genome_dir", type="string",
help="specify directory where genome / genomes are stored")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# read in contig lengths into dictionary
E.info("reading contigs file")
c_contigs = 0
contigs_lengths = {}
for fasta in FastaIterator.iterate(options.stdin):
c_contigs += 1
# titles of fasta records must be single strings with no special
# characters
contigs_lengths[fasta.title.split(" ")[0]] = len(fasta.sequence)
E.info("read %i contigs" % c_contigs)
# read in mapping between spcies and contigs
species_map = {}
for line in open(options.species_map).readlines():
data = line[:-1].split("\t")
contig, species = data[0], data[1]
species_map[contig] = species
# read genomes into memory
# NB this may need optimisin if using large
# genomes or many genomes
E.info("reading genomes from %s" % options.genome_dir)
# The directory must ONLY contain genome files!!
genomes_sequences = {}
c_genomes = 0
for genome_file in glob.glob(os.path.join(options.genome_dir, "*")):
c_genomes += 1
for fasta in FastaIterator.iterate(IOTools.openFile(genome_file)):
genomes_sequences[fasta.title] = fasta.sequence
E.info("read %i genomes from %s" % (c_genomes, options.genome_dir))
# iterate over the contigs and sample from the respective genome
E.info("iterating over contigs")
c_contigs_output = 0
for contig, length in contigs_lengths.iteritems():
if contig not in species_map:
E.warn("contig %s not in species map file" % contig)
else:
c_contigs_output += 1
genome = species_map[contig]
genome_length = len(genomes_sequences[genome])
# get the start position from which to sample
start = random.randint(1, genome_length)
try:
end = start + length - 1
except __HOLE__:
print "end of sampled contig extends beyond length of genome"
sampled_seq = genomes_sequences[genome][start:end]
options.stdout.write(
">%s_random\n%s\n" % (contig + "_%s" % species_map[contig], sampled_seq))
E.info("written %i contigs" % c_contigs_output)
# write footer and output benchmark information.
E.Stop()
|
ValueError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/contigs2random_sample.py/main
|
8,764
|
def get_values(instance, go_into={}, exclude=(), extra=(), skip_none=False):
"""
Transforms a django model instance into an object that can be used for
serialization.
@param instance(django.db.models.Model) - the model in question
@param go_into(dict) - relations with other models that need expanding
@param exclude(tuple) - fields that will be ignored
@param extra(tuple) - additional functions/properties which are not fields
@param skip_none(bool) - skip None field
Usage:
get_values(MyModel.objects.get(pk=187),
{'user': {'go_into': ('clan',),
'exclude': ('crest_blob',),
'extra': ('get_crest_path',)}},
('image'))
"""
from django.db.models.manager import Manager
from django.db.models import Model
SIMPLE_TYPES = (int, long, str, list, dict, tuple, bool, float, bool,
unicode, NoneType)
if not isinstance(instance, Model):
raise TypeError("Argument is not a Model")
value = {
'pk': instance.pk,
}
# check for simple string instead of tuples
# and dicts; this is shorthand syntax
if isinstance(go_into, str):
go_into = {go_into: {}}
if isinstance(exclude, str):
exclude = (exclude,)
if isinstance(extra, str):
extra = (extra,)
# process the extra properties/function/whatever
for field in extra:
property = getattr(instance, field)
if callable(property):
property = property()
if skip_none and property is None:
continue
elif isinstance(property, SIMPLE_TYPES):
value[field] = property
else:
value[field] = repr(property)
field_options = instance._meta.get_all_field_names()
for field in field_options:
try:
property = getattr(instance, field)
except:
continue
if skip_none and property is None:
continue
if field in exclude or field[0] == '_' or isinstance(property, Manager):
# if it's in the exclude tuple, ignore it
# if it's a "private" field, ignore it
# if it's an instance of manager (this means a more complicated
# relationship), ignore it
continue
elif go_into.has_key(field):
# if it's in the go_into dict, make a recursive call for that field
try:
field_go_into = go_into[field].get('go_into', {})
except AttributeError:
field_go_into = {}
try:
field_exclude = go_into[field].get('exclude', ())
except AttributeError:
field_exclude = ()
try:
field_extra = go_into[field].get('extra', ())
except __HOLE__:
field_extra = ()
value[field] = get_values(property,
field_go_into,
field_exclude,
field_extra, skip_none=skip_none)
else:
if isinstance(property, Model):
# if it's a model, we need it's PK #
value[field] = property.pk
elif isinstance(property, (datetime.date,
datetime.time,
datetime.datetime)):
value[field] = property
else:
# else, we just put the value #
if callable(property):
property = property()
if isinstance(property, SIMPLE_TYPES):
value[field] = property
else:
value[field] = repr(property)
return value
|
AttributeError
|
dataset/ETHPy150Open aparo/pyes/pyes/djangoutils.py/get_values
|
8,765
|
def get_language(language_code, reporter=None):
"""Return module with language localizations.
`language_code` is a "BCP 47" language tag.
If there is no matching module, warn and fall back to English.
"""
# TODO: use a dummy module returning emtpy strings?, configurable?
for tag in normalize_language_tag(language_code):
tag = tag.replace('-','_') # '-' not valid in module names
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals(), level=0)
except __HOLE__:
try:
module = __import__(tag, globals(), locals(), level=1)
except ImportError:
continue
_languages[tag] = module
return module
if reporter is not None:
reporter.warning(
'language "%s" not supported: ' % language_code +
'Docutils-generated text will be in English.')
module = __import__('en', globals(), locals(), level=1)
_languages[tag] = module # warn only one time!
return module
|
ImportError
|
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/docutils/languages/__init__.py/get_language
|
8,766
|
def setUp(self):
super(BaseTestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 30)
try:
test_timeout = int(test_timeout)
except __HOLE__:
# If timeout value is invalid, fail hard.
print("OS_TEST_TIMEOUT set to invalid value"
" defaulting to no timeout")
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if os.environ.get('OS_STDOUT_CAPTURE') in options.TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in options.TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(
fixtures.FakeLogger('pbr'))
# Older git does not have config --local, so create a temporary home
# directory to permit using git config --global without stepping on
# developer configuration.
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.FakeLogger())
# TODO(lifeless) we should remove PBR_VERSION from the environment.
# rather than setting it, because thats not representative - we need to
# test non-preversioned codepaths too!
self.useFixture(fixtures.EnvironmentVariable('PBR_VERSION', '0.0'))
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.package_dir = os.path.join(self.temp_dir, 'testpackage')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
self.package_dir)
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.package_dir)
self.addCleanup(self._discard_testpackage)
# Tests can opt into non-PBR_VERSION by setting preversioned=False as
# an attribute.
if not getattr(self, 'preversioned', True):
self.useFixture(fixtures.EnvironmentVariable('PBR_VERSION'))
setup_cfg_path = os.path.join(self.package_dir, 'setup.cfg')
with open(setup_cfg_path, 'rt') as cfg:
content = cfg.read()
content = content.replace(u'version = 0.1.dev', u'')
with open(setup_cfg_path, 'wt') as cfg:
cfg.write(content)
|
ValueError
|
dataset/ETHPy150Open blue-yonder/pyscaffold/pyscaffold/contrib/pbr/pbr/tests/base.py/BaseTestCase.setUp
|
8,767
|
def to_internal_value(self, data):
# If data is a string, try to base64-decode it.
if isinstance(data, str):
if 'data:' in data and ';base64,' in data:
header, data = data.split(';base64,')
try:
decoded_data = base64.b64decode(data)
except __HOLE__:
self.fail('invalid_image')
file_name = 'uploaded_image.' + self.guess_file_extension(decoded_data)
data = ContentFile(decoded_data, name=file_name)
return super().to_internal_value(data)
|
TypeError
|
dataset/ETHPy150Open jsmesami/naovoce/src/gallery/api/serializers.py/Base64ImageField.to_internal_value
|
8,768
|
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=CREATE_UPDATE_OPTIONAL_FIELDS,
allow_unknown=True
)
def create(self, request, *args, **kwargs):
"""Creates a reply to a review.
The new reply will start off as private. Only the author of the
reply (the user who is logged in and issuing this API call) will
be able to see and interact with the reply.
Initial data for the reply can be provided by passing data for
any number of the fields. If nothing is provided, the reply will
start off as blank.
If the user submitting this reply already has a pending draft reply
on this review, then this will update the existing draft and
return :http:`303`. Otherwise, this will create a new draft and
return :http:`201`. Either way, this request will return without
a payload and with a ``Location`` header pointing to the location of
the new draft reply.
Extra data can be stored on the reply for later lookup by passing
``extra_data.key_name=value``. The ``key_name`` and ``value`` can
be any valid strings. Passing a blank ``value`` will remove the key.
The ``extra_data.`` prefix is required.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
reply, is_new = Review.objects.get_or_create(
review_request=review_request,
user=request.user,
public=False,
base_reply_to=review)
if is_new:
status_code = 201 # Created
else:
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
status_code = 303 # See Other
result = self._update_reply(request, reply, *args, **kwargs)
if not isinstance(result, tuple) or result[0] != 200:
return result
else:
return status_code, result[1], {
'Location': self.get_href(reply, request, *args, **kwargs),
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_reply.py/ReviewReplyResource.create
|
8,769
|
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=CREATE_UPDATE_OPTIONAL_FIELDS,
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a reply.
This updates the fields of a draft reply. Published replies cannot
be updated.
Only the owner of a reply can make changes. One or more fields can
be updated at once.
The only special field is ``public``, which, if set to true, will
publish the reply. The reply will then be made publicly visible. Once
public, the reply cannot be modified or made private again.
Extra data can be stored on the reply for later lookup by passing
``extra_data.key_name=value``. The ``key_name`` and ``value`` can
be any valid strings. Passing a blank ``value`` will remove the key.
The ``extra_data.`` prefix is required.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
resources.review.get_object(request, *args, **kwargs)
reply = self.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
return self._update_reply(request, reply, *args, **kwargs)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_reply.py/ReviewReplyResource.update
|
8,770
|
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, __HOLE__):
return None
|
KeyError
|
dataset/ETHPy150Open dclambert/Python-ELM/random_layer.py/RandomLayer._get_user_components
|
8,771
|
def reset_upload(self, archive, times=3):
try:
return UploadState.reset(archive)
except __HOLE__ as e:
if e.errno == errno.EACCES and times > 0:
args = [self.reset_upload, archive, times-1]
return task.deferLater(reactor, 2, *args)
raise
|
OSError
|
dataset/ETHPy150Open longaccess/longaccess-client/lacli/server/__init__.py/LaServerCommand.reset_upload
|
8,772
|
def b85decode(b):
_b85dec = [None] * 256
for i, c in enumerate(iterbytes(_b85alphabet)):
_b85dec[c] = i
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in iterbytes(chunk):
acc = acc * 85 + _b85dec[c]
except __HOLE__:
for j, c in enumerate(iterbytes(chunk)):
if _b85dec[c] is None:
raise ValueError(
'bad base85 character at position %d' % (i + j)
)
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i)
result = b''.join(out)
if padding:
result = result[:-padding]
return result
|
TypeError
|
dataset/ETHPy150Open Esri/raster-functions/scripts/get-pip.py/b85decode
|
8,773
|
def bootstrap(tmpdir=None):
# Import pip so we can use it to install pip and maybe setuptools too
import pip
from pip.commands.install import InstallCommand
# Wrapper to provide default certificate with the lowest priority
class CertInstallCommand(InstallCommand):
def parse_args(self, args):
# If cert isn't specified in config or environment, we provide our
# own certificate through defaults.
# This allows user to specify custom cert anywhere one likes:
# config, environment variable or argv.
if not self.parser.get_default_values().cert:
self.parser.defaults["cert"] = cert_path # calculated below
return super(CertInstallCommand, self).parse_args(args)
pip.commands_dict["install"] = CertInstallCommand
# We always want to install pip
packages = ["pip"]
# Check if the user has requested us not to install setuptools
if "--no-setuptools" in sys.argv or os.environ.get("PIP_NO_SETUPTOOLS"):
args = [x for x in sys.argv[1:] if x != "--no-setuptools"]
else:
args = sys.argv[1:]
# We want to see if setuptools is available before attempting to
# install it
try:
import setuptools # noqa
except __HOLE__:
packages += ["setuptools"]
delete_tmpdir = False
try:
# Create a temporary directory to act as a working directory if we were
# not given one.
if tmpdir is None:
tmpdir = tempfile.mkdtemp()
delete_tmpdir = True
# We need to extract the SSL certificates from requests so that they
# can be passed to --cert
cert_path = os.path.join(tmpdir, "cacert.pem")
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
# Execute the included pip and use it to install the latest pip and
# setuptools from PyPI
sys.exit(pip.main(["install", "--upgrade"] + packages + args))
finally:
# Remove our temporary directory
if delete_tmpdir and tmpdir:
shutil.rmtree(tmpdir, ignore_errors=True)
|
ImportError
|
dataset/ETHPy150Open Esri/raster-functions/scripts/get-pip.py/bootstrap
|
8,774
|
def tearDown(self):
try:
os.remove('SLSQP.out')
except OSError:
pass
try:
os.remove('SNOPT_print.out')
os.remove('SNOPT_summary.out')
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/drivers/test/test_driver_param_indices.py/TestParamIndicesPyoptsparse.tearDown
|
8,775
|
def _calculate_word_prob_all():
links = Link.objects.all().order_by('-created_on')[:max_links_in_corpus]
try:
corpus = file(sample_corpus_location, 'r')
corpus_created = os.path.getmtime(sample_corpus_location)
diff = datetime.now() - datetime.fromtimestamp(corpus_created)
if diff.days > calculate_corpus_after:
raise IOError
all_corpus = pickle.load(corpus)
corpus.close()
except __HOLE__:
all_corpus = _calculate_word_prob(links)
corpus = file(sample_corpus_location, 'w')
pickle.dump(all_corpus, corpus)
corpus.close()
return all_corpus
|
IOError
|
dataset/ETHPy150Open agiliq/django-socialnews/socialnews/news/cron.py/_calculate_word_prob_all
|
8,776
|
def echo_json_response(response, pretty, limit=None):
'''Wrapper to echo JSON with optional 'pretty' printing. If pretty is not
provided explicity and stdout is a terminal (and not redirected or piped),
the default will be to indent and sort keys'''
indent = None
sort_keys = False
if pretty or (pretty is None and sys.stdout.isatty()):
indent = 2
sort_keys = True
try:
if hasattr(response, 'json_encode'):
response.json_encode(click.get_text_stream('stdout'), limit=limit,
indent=indent, sort_keys=sort_keys)
else:
res = response.get_raw()
res = json.dumps(json.loads(res), indent=2, sort_keys=True)
click.echo(res)
except __HOLE__ as ioe:
# hide scary looking broken pipe stack traces
raise click.ClickException(str(ioe))
|
IOError
|
dataset/ETHPy150Open planetlabs/planet-client-python/planet/scripts/__init__.py/echo_json_response
|
8,777
|
def read(value, split=False):
'''Get the value of an option interpreting as a file implicitly or
explicitly and falling back to the value if not explicitly specified.
If the value is '@name', then a file must exist with name and the returned
value will be the contents of that file. If the value is '@-' or '-', then
stdin will be read and returned as the value. Finally, if a file exists
with the provided value, that file will be read. Otherwise, the value
will be returned.
'''
v = str(value)
retval = value
if v[0] == '@' or v == '-':
fname = '-' if v == '-' else v[1:]
try:
with click.open_file(fname) as fp:
if not fp.isatty():
retval = fp.read()
else:
retval = None
except __HOLE__ as ioe:
# if explicit and problems, raise
if v[0] == '@':
raise click.ClickException(str(ioe))
elif path.exists(v) and path.isfile(v):
with click.open_file(v) as fp:
retval = fp.read()
if retval and split and type(retval) != tuple:
retval = _split(retval.strip())
return retval
|
IOError
|
dataset/ETHPy150Open planetlabs/planet-client-python/planet/scripts/__init__.py/read
|
8,778
|
@scene_type
@workspace
@click.argument("destination")
@limit_option(default=-1)
@click.option("--dryrun", is_flag=True, help='Do not actually download')
@click.option("--products", multiple=True,
type=click.Choice(ORTHO_PRODUCTS + ['all']),
help='Specifiy products to download, default is visual')
@cli.command('sync')
def sync(destination, workspace, scene_type, limit, dryrun, products):
'''Synchronize a directory to a specified AOI or workspace'''
aoi = None
filters = {'workspace': workspace}
if 'all' in products:
products = ORTHO_PRODUCTS
else:
products = products or ('visual',)
sync_tool = _SyncTool(client(), destination, aoi,
scene_type, products, **filters)
try:
to_fetch = sync_tool.init(limit)
except __HOLE__ as ve:
raise click.ClickException(str(ve))
click.echo('total scene products to fetch: %s' % to_fetch)
if limit > -1:
click.echo('limiting to %s' % limit)
if dryrun:
click.echo('would download:')
for scene in sync_tool.get_scenes_to_sync():
click.echo(scene['id'])
return
def progress_callback(name, remaining):
click.echo('downloaded %s, remaining %s' %
(name, remaining))
start_time = time.time()
summary = sync_tool.sync(progress_callback)
if summary.transferred:
summarize_throughput(summary.transferred, start_time)
|
ValueError
|
dataset/ETHPy150Open planetlabs/planet-client-python/planet/scripts/__init__.py/sync
|
8,779
|
@cli.command('set-workspace')
@click.argument("workspace", default="@-", required=False)
@click.option('--id', help='If provided, update the workspace with this id')
@click.option('--aoi', help='The geometry to use')
@click.option('--name', help='Workspace name')
@click.option('--create', is_flag=True, help='Specify workspace creation')
@click.option('--where', nargs=3, multiple=True,
help=('Provide additional search criteria. See '
'https://www.planet.com/docs/v0/scenes/#metadata for '
'search metadata fields.'))
def set_workspace(id, aoi, name, create, workspace, where):
'''Create or modify a workspace'''
workspace = read(workspace)
try:
workspace = json.loads(workspace) if workspace else None
except __HOLE__:
raise click.ClickException('workspace must be JSON')
cl = client()
if workspace is None and id:
workspace = cl.get_workspace(id).get()
# what workspace id are we working with
if not id:
id = workspace.get('id', None)
if create:
id = None
aoi = read_aoi(aoi)
if aoi:
geom = api.utils.geometry_from_json(aoi)
if geom is None:
raise click.ClickException('unable to find geometry in aoi')
workspace['filters'] = {
'geometry': {
'intersects': geom
}
}
if name:
workspace['name'] = name
if where:
if 'filters' not in workspace:
workspace['filters'] = {}
filters = workspace['filters']
for k, c, v in where:
if k not in filters:
filters[k] = {}
group = filters.get(k)
if v == '-' and c in group:
group.pop(c)
else:
group[c] = v
if not group:
filters.pop(k)
if not workspace:
raise click.ClickException('nothing to do')
echo_json_response(call_and_wrap(cl.set_workspace,
workspace, id), pretty)
|
ValueError
|
dataset/ETHPy150Open planetlabs/planet-client-python/planet/scripts/__init__.py/set_workspace
|
8,780
|
def __init__(self):
try:
import raven
self.enabled = True
dsn = os.environ['SPLASH_SENTRY_DSN']
if dsn.startswith('https'):
dsn = dsn.replace('https://', 'twisted+https://')
self.client = raven.Client(dsn)
except (ImportError, __HOLE__):
self.enabled = False
|
KeyError
|
dataset/ETHPy150Open scrapinghub/splash/splash/sentry.py/SentryLogger.__init__
|
8,781
|
def process_options(user_args):
try:
db_plugin = ServiceLocator.get_component("db_plugin")
valid_groups = db_plugin.GetAllGroups()
valid_types = db_plugin.GetAllTypes() + ['all', 'quiet']
arg = parse_options(user_args, valid_groups, valid_types)
except KeyboardInterrupt as e: #Exception as e:
usage("Invalid OWTF option(s) " + e)
# Default settings:
profiles = {}
plugin_group = arg.PluginGroup
if arg.CustomProfile: # Custom profiles specified
# Quick pseudo-validation check
for profile in arg.CustomProfile.split(','):
chunks = profile.split(':')
if len(chunks) != 2 or not os.path.exists(chunks[1]):
usage("Invalid Profile")
else: # profile "ok" :)
profiles[chunks[0]] = chunks[1]
if arg.OnlyPlugins:
arg.OnlyPlugins, plugin_groups = get_plugins_from_arg(arg.OnlyPlugins)
try:
# Set Plugin Group according to plugin list specified
plugin_group = plugin_groups[0]
except __HOLE__:
usage("Please use either OWASP/OWTF codes or Plugin names")
logging.info(
"Defaulting Plugin Group to '" +
plugin_group + "' based on list of plugins supplied")
if arg.ExceptPlugins:
arg.ExceptPlugins, plugin_groups = get_plugins_from_arg(arg.ExceptPlugins)
if arg.TOR_mode:
arg.TOR_mode = arg.TOR_mode.split(":")
if(arg.TOR_mode[0] == "help"):
from framework.http.proxy.tor_manager import TOR_manager
TOR_manager.msg_configure_tor()
exit(0)
if len(arg.TOR_mode) == 1:
if arg.TOR_mode[0] != "help":
usage("Invalid argument for TOR-mode")
elif len(arg.TOR_mode) != 5:
usage("Invalid argument for TOR-mode")
else:
# Enables OutboundProxy.
if arg.TOR_mode[0] == '':
outbound_proxy_ip = "127.0.0.1"
else:
outbound_proxy_ip = arg.TOR_mode[0]
if arg.TOR_mode[1] == '':
outbound_proxy_port = "9050" # default TOR port
else:
outbound_proxy_port = arg.TOR_mode[1]
arg.OutboundProxy = "socks://" + outbound_proxy_ip + \
":" + outbound_proxy_port
if arg.Botnet_mode: # Checking arguments
arg.Botnet_mode = arg.Botnet_mode.split(":")
if arg.Botnet_mode[0] == "miner" and len(arg.Botnet_mode) != 1:
usage("Invalid argument for Botnet mode\n Mode must be miner or list")
if arg.Botnet_mode[0] == "list":
if len(arg.Botnet_mode) != 2:
usage("Invalid argument for Botnet mode\n Mode must be miner or list")
if not os.path.isfile(os.path.expanduser(arg.Botnet_mode[1])):
usage("Error Proxy List not found! Please check the path.")
if arg.OutboundProxy:
arg.OutboundProxy = arg.OutboundProxy.split('://')
if len(arg.OutboundProxy) == 2:
arg.OutboundProxy = arg.OutboundProxy + \
arg.OutboundProxy.pop().split(':')
if arg.OutboundProxy[0] not in ["socks", "http"]:
usage("Invalid argument for Outbound Proxy")
else:
arg.OutboundProxy = arg.OutboundProxy.pop().split(':')
# OutboundProxy should be type://ip:port
if (len(arg.OutboundProxy) not in [2, 3]):
usage("Invalid argument for Outbound Proxy")
else: # Check if the port is an int.
try:
int(arg.OutboundProxy[-1])
except ValueError:
usage("Invalid port provided for Outbound Proxy")
if arg.InboundProxy:
arg.InboundProxy = arg.InboundProxy.split(':')
# InboundProxy should be (ip:)port:
if len(arg.InboundProxy) not in [1, 2]:
usage("Invalid argument for Inbound Proxy")
else:
try:
int(arg.InboundProxy[-1])
except ValueError:
usage("Invalid port for Inbound Proxy")
plugin_types_for_group = db_plugin.GetTypesForGroup(plugin_group)
if arg.PluginType == 'all':
arg.PluginType = plugin_types_for_group
elif arg.PluginType == 'quiet':
arg.PluginType = ['passive', 'semi_passive']
scope = arg.Targets or [] # Arguments at the end are the URL target(s)
num_targets = len(scope)
if plugin_group != 'auxiliary' and num_targets == 0 and not arg.list_plugins:
#usage("") OMG, #TODO: Fix this
pass
elif num_targets == 1: # Check if this is a file
if os.path.isfile(scope[0]):
logging.info("Scope file: trying to load targets from it ..")
new_scope = []
for target in open(scope[0]).read().split("\n"):
CleanTarget = target.strip()
if not CleanTarget:
continue # Skip blank lines
new_scope.append(CleanTarget)
if len(new_scope) == 0: # Bad file
usage("Please provide a scope file (1 target x line)")
scope = new_scope
for target in scope:
if target[0] == "-":
usage("Invalid Target: " + target)
args = ''
if plugin_group == 'auxiliary':
# For auxiliary plugins, the scope are the parameters.
args = scope
# auxiliary plugins do not have targets, they have metasploit-like
# parameters.
scope = ['auxiliary']
return {
'list_plugins': arg.list_plugins,
'Force_Overwrite': arg.ForceOverwrite,
'Interactive': arg.Interactive == 'yes',
'Simulation': arg.Simulation,
'Scope': scope,
'argv': sys.argv,
'PluginType': arg.PluginType,
'OnlyPlugins': arg.OnlyPlugins,
'ExceptPlugins': arg.ExceptPlugins,
'InboundProxy': arg.InboundProxy,
'OutboundProxy': arg.OutboundProxy,
'OutboundProxyAuth': arg.OutboundProxyAuth,
'Profiles': profiles,
'PluginGroup': plugin_group,
'RPort': arg.RPort,
'PortWaves' : arg.PortWaves,
'ProxyMode': arg.ProxyMode,
'TOR_mode' : arg.TOR_mode,
'Botnet_mode' : arg.Botnet_mode,
'nowebui': arg.nowebui,
'Args': args}
|
IndexError
|
dataset/ETHPy150Open owtf/owtf/owtf.py/process_options
|
8,782
|
def run_owtf(core, args):
try:
if core.start(args):
# Only if Start is for real (i.e. not just listing plugins, etc)
core.finish() # Not Interrupted or Crashed.
except __HOLE__:
# NOTE: The user chose to interact: interactivity check redundant here:
logging.warning("OWTF was aborted by the user:")
logging.info("Please check report/plugin output files for partial results")
# Interrupted. Must save the DB to disk, finish report, etc.
core.finish()
except SystemExit:
pass # Report already saved, framework tries to exit.
finally: # Needed to rename the temp storage dirs to avoid confusion.
core.clean_temp_storage_dirs()
|
KeyboardInterrupt
|
dataset/ETHPy150Open owtf/owtf/owtf.py/run_owtf
|
8,783
|
def get(self, name=REG_UNNAMED):
# We accept integers or strings a register names.
name = str(name)
assert len(str(name)) == 1, "Register names must be 1 char long."
# Did we request a special register?
if name == REG_BLACK_HOLE:
return
elif name == REG_FILE_NAME:
try:
return [self.view.file_name()]
except __HOLE__:
return ''
elif name in REG_SYS_CLIPBOARD_ALL:
return [sublime.get_clipboard()]
elif ((name not in (REG_UNNAMED, REG_SMALL_DELETE)) and
(name in REG_SPECIAL)):
return
# Special case lumped among these --user always wants the sys
# clipboard.
elif ((name == REG_UNNAMED) and
(self.settings.view['vintageous_use_sys_clipboard'] is True)):
return [sublime.get_clipboard()]
# If the expression register holds a value and we're requesting the
# unnamed register, return the expression register and clear it
# aftwerwards.
elif name == REG_UNNAMED and _REGISTER_DATA.get(REG_EXPRESSION, ''):
value = _REGISTER_DATA[REG_EXPRESSION]
_REGISTER_DATA[REG_EXPRESSION] = ''
return value
# We requested an [a-z0-9"] register.
if name.isdigit():
if name == '0':
return _REGISTER_DATA[name]
return _REGISTER_DATA['1-9'][int(name) - 1]
try:
# In Vim, "A and "a seem to be synonyms, so accept either.
return _REGISTER_DATA[name.lower()]
except KeyError:
pass
|
AttributeError
|
dataset/ETHPy150Open guillermooo/Vintageous/vi/registers.py/Registers.get
|
8,784
|
def __setitem__(self, key, value):
try:
if key.isupper():
self.append_to(key, value)
else:
self.set(key, value)
except __HOLE__:
self.set(key, value)
|
AttributeError
|
dataset/ETHPy150Open guillermooo/Vintageous/vi/registers.py/Registers.__setitem__
|
8,785
|
def wait(self, timeout=None):
"""
Wait for events.
"""
try:
if timeout:
gevent.sleep(timeout)
else:
while True:
gevent.sleep(1000)
except (__HOLE__, SystemExit, Exception):
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open iancmcc/ouimeaux/ouimeaux/environment.py/Environment.wait
|
8,786
|
def get_switch(self, name):
"""
Get a switch by name.
"""
try:
return self._switches[name]
except __HOLE__:
raise UnknownDevice(name)
|
KeyError
|
dataset/ETHPy150Open iancmcc/ouimeaux/ouimeaux/environment.py/Environment.get_switch
|
8,787
|
def get_motion(self, name):
"""
Get a motion by name.
"""
try:
return self._motions[name]
except __HOLE__:
raise UnknownDevice(name)
|
KeyError
|
dataset/ETHPy150Open iancmcc/ouimeaux/ouimeaux/environment.py/Environment.get_motion
|
8,788
|
def get_bridge(self, name):
"""
Get a bridge by name.
"""
try:
return self._bridges[name]
except __HOLE__:
raise UnknownDevice(name)
|
KeyError
|
dataset/ETHPy150Open iancmcc/ouimeaux/ouimeaux/environment.py/Environment.get_bridge
|
8,789
|
def get_maker(self, name):
"""
Get a maker by name.
"""
try:
return self._makers[name]
except __HOLE__:
raise UnknownDevice(name)
|
KeyError
|
dataset/ETHPy150Open iancmcc/ouimeaux/ouimeaux/environment.py/Environment.get_maker
|
8,790
|
def make_raw_request_message(method, path, headers, version=HttpVersion11,
should_close=False, compression=False):
raw_headers = [(k.encode('utf-8'), v.encode('utf-8'))
for k, v in headers.items()]
try:
message = RawRequestMessage(method=method, path=path, headers=headers,
raw_headers=raw_headers,
version=version, should_close=should_close,
compression=compression)
except __HOLE__: # aiohttp < 0.21.x
message = RawRequestMessage(method=method, path=path, headers=headers,
version=version, should_close=should_close,
compression=compression)
return message
|
TypeError
|
dataset/ETHPy150Open aio-libs/sockjs/tests/test_base.py/make_raw_request_message
|
8,791
|
def _GetEnviron(self, name):
"""Helper method ensures environment configured as expected.
Args:
name: Name of environment variable to get.
Returns:
Environment variable associated with name.
Raises:
ConfigurationError if required environment variable is not found.
"""
try:
return os.environ[name]
except __HOLE__:
raise ConfigurationError('%s is not set in environment.' % name)
|
KeyError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/blobstore/blobstore_stub.py/BlobstoreServiceStub._GetEnviron
|
8,792
|
def setup_apiv2():
"""
Setup apiv2 when using PyQt4 and Python2.
"""
# setup PyQt api to version 2
if sys.version_info[0] == 2:
logging.getLogger(__name__).debug(
'setting up SIP API to version 2')
import sip
try:
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
except __HOLE__:
logging.getLogger(__name__).critical(
"failed to set up sip api to version 2 for PyQt4")
raise ImportError('PyQt4')
|
ValueError
|
dataset/ETHPy150Open dragondjf/PFramer/qframer/qt/__init__.py/setup_apiv2
|
8,793
|
def autodetect():
"""
Auto-detects and use the first available QT_API by importing them in the
following order:
1) PyQt5
2) PyQt4
3) PySide
"""
logging.getLogger(__name__).debug('auto-detecting QT_API')
try:
logging.getLogger(__name__).debug('trying PyQt5')
import PyQt5
os.environ[QT_API] = PYQT5_API
from PyQt5 import QtCore
QtCore.QCoreApplication.setLibraryPaths([os.sep.join([sys.prefix,
'Lib', 'site-packages', 'PyQt5', 'plugins'])])
logging.getLogger(__name__).debug('imported PyQt5')
except ImportError:
try:
logging.getLogger(__name__).debug('trying PyQt4')
setup_apiv2()
import PyQt4
os.environ[QT_API] = PYQT4_API
logging.getLogger(__name__).debug('imported PyQt4')
except ImportError:
try:
logging.getLogger(__name__).debug('trying PySide')
import PySide
os.environ[QT_API] = PYSIDE_API
logging.getLogger(__name__).debug('imported PySide')
except __HOLE__:
raise PythonQtError('No Qt bindings could be found')
|
ImportError
|
dataset/ETHPy150Open dragondjf/PFramer/qframer/qt/__init__.py/autodetect
|
8,794
|
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the list of names and turns them into PathInfo tuples. Note that
a single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field). Finally, the method returns
those names that weren't found (which are likely transforms and the
final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif pos == 0:
for rel in opts.related_objects:
if (name == rel.related_model._meta.model_name and
rel.related_name == rel.related_model._meta.default_related_name):
related_name = rel.related_name
field = opts.get_field(related_name)
warnings.warn(
"Query lookup '%s' is deprecated in favor of "
"Meta.default_related_name '%s'."
% (name, related_name),
RemovedInDjango20Warning, 2
)
break
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except __HOLE__:
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(
PathInfo(final_field.model._meta, opts, targets, final_field, False, True)
)
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
|
AttributeError
|
dataset/ETHPy150Open django/django/django/db/models/sql/query.py/Query.names_to_path
|
8,795
|
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except __HOLE__:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
|
AttributeError
|
dataset/ETHPy150Open django/django/django/db/models/sql/query.py/Query.get_loaded_field_names
|
8,796
|
def target_function(self, running, data):
module_verbosity = boolify(self.verbosity)
name = threading.current_thread().name
print_status(name, 'process is starting...', verbose=module_verbosity)
while running.is_set():
try:
line = data.next().split(":")
user = line[0].strip()
password = line[1].strip()
except __HOLE__:
break
else:
retries = 0
while retries < 3:
try:
tn = telnetlib.Telnet(self.target, self.port)
tn.expect(["Login: ", "login: "], 5)
tn.write(user + "\r\n")
tn.expect(["Password: ", "password"], 5)
tn.write(password + "\r\n")
tn.write("\r\n")
(i, obj, res) = tn.expect(["Incorrect", "incorrect"], 5)
tn.close()
if i != -1:
print_error("Target: {}:{} {}: Authentication Failed - Username: '{}' Password: '{}'".format(self.target, self.port, name, user, password), verbose=module_verbosity)
else:
if any(map(lambda x: x in res, ["#", "$", ">"])) or len(res) > 500: # big banner e.g. mikrotik
if boolify(self.stop_on_success):
running.clear()
print_success("Target: {}:{} {}: Authentication Succeed - Username: '{}' Password: '{}'".format(self.target, self.port, name, user, password), verbose=module_verbosity)
self.credentials.append((self.target, self.port, user, password))
tn.close()
break
except EOFError:
print_error(name, "Connection problem. Retrying...", verbose=module_verbosity)
retries += 1
if retries > 2:
print_error("Too much connection problems. Quiting...", verbose=module_verbosity)
return
continue
print_status(name, 'process is terminated.', verbose=module_verbosity)
|
StopIteration
|
dataset/ETHPy150Open reverse-shell/routersploit/routersploit/modules/creds/telnet_default.py/Exploit.target_function
|
8,797
|
def __run_experiment__(self):
print "starting mcmc chain"
self.mcmc_chain.run()
# compute quantiles after burn_in if possible
try:
print "trying to precompute std quantiles"
burned_in = self.mcmc_chain.samples[self.mcmc_chain.mcmc_params.burnin:, :]
self.quantiles = self.mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\
burned_in, self.ref_quantiles)
except __HOLE__:
print "computing quantiles is not possible, skipping"
|
NotImplementedError
|
dataset/ETHPy150Open karlnapf/kameleon-mcmc/kameleon_mcmc/experiments/SingleChainExperiment.py/SingleChainExperiment.__run_experiment__
|
8,798
|
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, __HOLE__):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
|
IndexError
|
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/parsing/model.py/_TestData.start_table
|
8,799
|
def __init__(self, content, comment=None, linenumber=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.keyword = content[len(self.assign)]
except __HOLE__:
self.keyword = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
self.linenumber = linenumber
|
IndexError
|
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/parsing/model.py/Step.__init__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.