Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
1,000
|
def run_test(self):
result = None
timeout_duration = 10 # Default test case timeout
event_queue = Queue() # Events from DUT to host
dut_event_queue = Queue() # Events from host to DUT {k;v}
def callback__notify_prn(key, value, timestamp):
"""! Handles __norify_prn. Prints all lines in separate log line """
for line in value.splitlines():
self.logger.prn_inf(line)
callbacks = {
"__notify_prn" : callback__notify_prn
}
# if True we will allow host test to consume all events after test is finished
callbacks_consume = True
# Flag check if __exit event occurred
callbacks__exit = False
# Handle to dynamically loaded host test object
self.test_supervisor = None
# Version: greentea-client version from DUT
self.client_version = None
self.logger.prn_inf("starting host test process...")
def start_conn_process():
# Create device info here as it may change after restart.
config = {
"digest" : "serial",
"port" : self.mbed.port,
"baudrate" : self.mbed.serial_baud,
"program_cycle_s" : self.options.program_cycle_s,
"reset_type" : self.options.forced_reset_type
}
# DUT-host communication process
args = (event_queue, dut_event_queue, self.prn_lock, config)
p = Process(target=conn_process, args=args)
p.deamon = True
p.start()
return p
p = start_conn_process()
start_time = time()
try:
consume_preamble_events = True
while (time() - start_time) < timeout_duration:
# Handle default events like timeout, host_test_name, ...
if not event_queue.empty():
try:
(key, value, timestamp) = event_queue.get(timeout=1)
except QueueEmpty:
continue
if consume_preamble_events:
if key == '__timeout':
# Override default timeout for this event queue
start_time = time()
timeout_duration = int(value) # New timeout
self.logger.prn_inf("setting timeout to: %d sec"% int(value))
elif key == '__version':
self.client_version = value
self.logger.prn_inf("DUT greentea-client version: " + self.client_version)
elif key == '__host_test_name':
# Load dynamically requested host test
self.test_supervisor = get_host_test(value)
if self.test_supervisor:
# Pass communication queues and setup() host test
self.test_supervisor.setup_communication(event_queue, dut_event_queue)
try:
# After setup() user should already register all callbacks
self.test_supervisor.setup()
except (TypeError, __HOLE__):
# setup() can throw in normal circumstances TypeError and ValueError
self.logger.prn_err("host test setup() failed, reason:")
self.logger.prn_inf("==== Traceback start ====")
for line in traceback.format_exc().splitlines():
print line
self.logger.prn_inf("==== Traceback end ====")
result = self.RESULT_ERROR
break
self.logger.prn_inf("host test setup() call...")
if self.test_supervisor.get_callbacks():
callbacks.update(self.test_supervisor.get_callbacks())
self.logger.prn_inf("CALLBACKs updated")
else:
self.logger.prn_wrn("no CALLBACKs specified by host test")
self.logger.prn_inf("host test detected: %s"% value)
else:
self.logger.prn_err("host test not detected: %s"% value)
consume_preamble_events = False
elif key == '__sync':
# This is DUT-Host Test handshake event
self.logger.prn_inf("sync KV found, uuid=%s, timestamp=%f"% (str(value), timestamp))
elif key == '__notify_conn_lost':
# This event is sent by conn_process, DUT connection was lost
self.logger.prn_err(value)
self.logger.prn_wrn("stopped to consume events due to %s event"% key)
callbacks_consume = False
result = self.RESULT_IO_SERIAL
break
elif key.startswith('__'):
# Consume other system level events
pass
else:
self.logger.prn_err("orphan event in preamble phase: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
else:
if key == '__notify_complete':
# This event is sent by Host Test, test result is in value
# or if value is None, value will be retrieved from HostTest.result() method
self.logger.prn_inf("%s(%s)"% (key, str(value)))
result = value
break
elif key == '__reset_dut':
# Disconnecting and re-connecting comm process will reset DUT
dut_event_queue.put(('__host_test_finished', True, time()))
p.join()
# self.mbed.update_device_info() - This call is commented but left as it would be required in hard reset.
p = start_conn_process()
elif key == '__notify_conn_lost':
# This event is sent by conn_process, DUT connection was lost
self.logger.prn_err(value)
self.logger.prn_wrn("stopped to consume events due to %s event"% key)
callbacks_consume = False
result = self.RESULT_IO_SERIAL
break
elif key == '__exit':
# This event is sent by DUT, test suite exited
self.logger.prn_inf("%s(%s)"% (key, str(value)))
callbacks__exit = True
break
elif key in callbacks:
# Handle callback
callbacks[key](key, value, timestamp)
else:
self.logger.prn_err("orphan event in main phase: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
except Exception:
self.logger.prn_err("something went wrong in event main loop!")
self.logger.prn_inf("==== Traceback start ====")
for line in traceback.format_exc().splitlines():
print line
self.logger.prn_inf("==== Traceback end ====")
result = self.RESULT_ERROR
time_duration = time() - start_time
self.logger.prn_inf("test suite run finished after %.2f sec..."% time_duration)
# Force conn_proxy process to return
dut_event_queue.put(('__host_test_finished', True, time()))
p.join()
self.logger.prn_inf("CONN exited with code: %s"% str(p.exitcode))
# Callbacks...
self.logger.prn_inf("No events in queue" if event_queue.empty() else "Some events in queue")
# If host test was used we will:
# 1. Consume all existing events in queue if consume=True
# 2. Check result from host test and call teardown()
if callbacks_consume:
# We are consuming all remaining events if requested
while not event_queue.empty():
try:
(key, value, timestamp) = event_queue.get(timeout=1)
except QueueEmpty:
break
if key == '__notify_complete':
# This event is sent by Host Test, test result is in value
# or if value is None, value will be retrieved from HostTest.result() method
self.logger.prn_inf("%s(%s)"% (key, str(value)))
result = value
elif key.startswith('__'):
# Consume other system level events
pass
elif key in callbacks:
callbacks[key](key, value, timestamp)
else:
self.logger.prn_wrn(">>> orphan event: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
self.logger.prn_inf("stopped consuming events")
if result is not None: # We must compare here against None!
# Here for example we've received some error code like IOERR_COPY
self.logger.prn_inf("host test result() call skipped, received: %s"% str(result))
else:
if self.test_supervisor:
result = self.test_supervisor.result()
self.logger.prn_inf("host test result(): %s"% str(result))
if not callbacks__exit:
self.logger.prn_wrn("missing __exit event from DUT")
#if not callbacks__exit and not result:
if not callbacks__exit and result is None:
self.logger.prn_err("missing __exit event from DUT and no result from host test, timeout...")
result = self.RESULT_TIMEOUT
self.logger.prn_inf("calling blocking teardown()")
if self.test_supervisor:
self.test_supervisor.teardown()
self.logger.prn_inf("teardown() finished")
return result
|
ValueError
|
dataset/ETHPy150Open ARMmbed/htrun/mbed_host_tests/host_tests_runner/host_test_default.py/DefaultTestSelector.run_test
|
1,001
|
def execute(self):
"""! Test runner for host test.
@details This function will start executing test and forward test result via serial port
to test suite. This function is sensitive to work-flow flags such as --skip-flashing,
--skip-reset etc.
First function will flash device with binary, initialize serial port for communication,
reset target. On serial port handshake with test case will be performed. It is when host
test reads property data from serial port (sent over serial port).
At the end of the procedure proper host test (defined in set properties) will be executed
and test execution timeout will be measured.
"""
result = self.RESULT_UNDEF
try:
# Copy image to device
if self.options.skip_flashing:
self.logger.prn_inf("copy image onto target... SKIPPED!")
else:
self.logger.prn_inf("copy image onto target...")
result = self.mbed.copy_image()
if not result:
result = self.RESULT_IOERR_COPY
return self.get_test_result_int(result)
# Execute test if flashing was successful or skipped
test_result = self.run_test()
if test_result == True:
result = self.RESULT_SUCCESS
elif test_result == False:
result = self.RESULT_FAILURE
elif test_result is None:
result = self.RESULT_ERROR
else:
result = test_result
# This will be captured by Greentea
self.logger.prn_inf("{{result;%s}}"% result)
return self.get_test_result_int(result)
except __HOLE__:
return(-3) # Keyboard interrupt
|
KeyboardInterrupt
|
dataset/ETHPy150Open ARMmbed/htrun/mbed_host_tests/host_tests_runner/host_test_default.py/DefaultTestSelector.execute
|
1,002
|
def basic_auth(realm, checkpassword, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
try:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
username, password = base64_decode(params).split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
except (__HOLE__, binascii.Error): # split() error, base64.decodestring() error
raise cherrypy.HTTPError(400, 'Bad Request')
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/lib/auth_basic.py/basic_auth
|
1,003
|
def errstr (exc):
try:
code = exc.args[0] if exc.args else exc.errno
return '[Errno %s] %s' % (errno.errorcode.get(code,str(code)),str(exc))
except KeyError:
return '[Errno unknown (key)] %s' % str(exc)
except __HOLE__:
return '[Errno unknown (attr)] %s' % str(exc)
|
AttributeError
|
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/util/errstr.py/errstr
|
1,004
|
def prepare_search_groups(events, source_duration, chapter_times, max_ts_duration, max_ts_distance):
last_unlinked = None
for idx, event in enumerate(events):
if event.is_comment:
try:
event.link_event(events[idx+1])
except IndexError:
event.link_event(last_unlinked)
continue
if (event.start + event.duration / 2.0) > source_duration:
logging.info('Event time outside of audio range, ignoring: %s' % unicode(event))
event.link_event(last_unlinked)
continue
elif event.end == event.start:
logging.debug('{0}: skipped because zero duration'.format(format_time(event.start)))
try:
event.link_event(events[idx + 1])
except IndexError:
event.link_event(last_unlinked)
continue
# link lines with start and end times identical to some other event
# assuming scripts are sorted by start time so we don't search the entire collection
same_start = lambda x: event.start == x.start
processed = next((x for x in takewhile(same_start, reversed(events[:idx])) if not x.linked and x.end == event.end),None)
if processed:
event.link_event(processed)
else:
last_unlinked = event
events = (e for e in events if not e.linked)
search_groups = merge_short_lines_into_groups(events, chapter_times, max_ts_duration, max_ts_distance)
# link groups contained inside other groups to the larger group
passed_groups = []
for idx, group in enumerate(search_groups):
try:
other = next(x for x in reversed(search_groups[:idx])
if x[0].start <= group[0].start
and x[-1].end >= group[-1].end)
for event in group:
event.link_event(other[0])
except __HOLE__:
passed_groups.append(group)
return passed_groups
|
StopIteration
|
dataset/ETHPy150Open tp7/Sushi/sushi.py/prepare_search_groups
|
1,005
|
def hydrate(self, bundle):
"""Hydrate handles the conversion of fqdn to a label or domain."""
if 'fqdn' in bundle.data:
try:
label_domain = ensure_label_domain(bundle.data['fqdn'])
bundle.data['label'], bundle.data['domain'] = label_domain
except __HOLE__, e:
errors = {}
errors['fqdn'] = e.messages
bundle.errors['error_messages'] = json.dumps(errors)
# We should use an Error(Dict|List) to maintain consistency
# with the errors that are thrown by full_clean.
else:
errors = {}
errors['fqdn'] = ("Couldn't determine a label and "
"domain for this record.")
bundle.errors['error_messages'] = json.dumps(errors)
if 'ttl' in bundle.data and bundle.data['ttl'] == 'None':
bundle.data['ttl'] = None
return bundle
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/mozdns/api/v1/api.py/CommonDNSResource.hydrate
|
1,006
|
@transaction.commit_on_success
def obj_create(self, bundle, request=None, **kwargs):
"""
A generic version of creating a dns object. The strategy is simple: get
bundle.data to the point where we call Class(**bundle.data) which
creates an object. We then clean it and then save it. Finally we save
any views that were in the bundle.
"""
# Note, error_response() raises an ImmediateHttpResponse exception.
# This should trigger transaction.commit_on_success to rollback what
# happens in this function
kv = self.extract_kv(bundle)
# KV pairs should be saved after the object has been created
if bundle.errors:
self.error_response(bundle.errors, request)
views = bundle.data.pop('views', [])
comment = bundle.data.pop('comment', '')
# views should be saved after the object has been created
if bundle.errors:
self.error_response(bundle.errors, request)
bundle = self.full_hydrate(bundle)
if bundle.errors:
self.error_response(bundle.errors, request)
# Create the Object
try:
self.apply_commit(bundle.obj, bundle.data)
except (__HOLE__, TypeError), e:
bundle.errors['error_messages'] = e.message
self.error_response(bundle.errors, request)
return self.save_commit(request, bundle, views, comment, kv)
|
ValueError
|
dataset/ETHPy150Open mozilla/inventory/mozdns/api/v1/api.py/CommonDNSResource.obj_create
|
1,007
|
@locked_function('inventory.record_lock')
def save_commit(self, request, bundle, views, comment, kv):
verb = "updated" if bundle.obj.pk else "created"
error = None
try:
# obj's save() method should be calling full_clean()
bundle.obj.save()
except __HOLE__, e:
error = json.dumps(e.message_dict)
except Exception, e:
error = "Please report this error: {0}".format(
traceback.format_exc()
)
if error:
bundle.errors['error_messages'] = error
self.error_response(bundle.errors, request)
reversion.set_comment(comment)
if request.user.is_authenticated():
reversion.set_user(request.user)
try:
self.update_views(bundle.obj, views)
# Now do the kv magic
if kv:
bundle.obj.update_attrs()
for k, v in kv:
setattr(bundle.obj.attrs, k, v)
except ValidationError, e:
bundle.data['error_messages'] = {'views': (
"Some views failed validation with the message: {0}. "
"The object was not" "{1}".format(e.messages[0], verb)
)}
self.error_response(bundle.errors, request)
return bundle
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/mozdns/api/v1/api.py/CommonDNSResource.save_commit
|
1,008
|
def hydrate(self, bundle):
# Nameservers don't have a label
if 'fqdn' in bundle.data:
bundle.errors['domain'] = "Nameservers shouldn't have a fqdn"
elif 'label' in bundle.data:
bundle.errors['domain'] = "Nameservers shouldn't have a label"
else:
domain_name = bundle.data.get('domain', '')
try:
domain = Domain.objects.get(name=domain_name)
bundle.data['domain'] = domain
except __HOLE__:
error = "Couldn't find domain {0}".format(domain_name)
bundle.errors['domain'] = error
return bundle
|
ObjectDoesNotExist
|
dataset/ETHPy150Open mozilla/inventory/mozdns/api/v1/api.py/NameserverResource.hydrate
|
1,009
|
def goToDirectory(alias):
"""go to a saved directory"""
if not settings.platformCompatible():
return False
data = pickle.load(open(settings.getDataFile(), "rb"))
try:
data[alias]
except __HOLE__:
speech.fail("Sorry, it doesn't look like you have saved " + alias + " yet.")
speech.fail("Go to the directory you'd like to save and type 'hallie save as " + alias + "\'")
return
try:
(output, error) = subprocess.Popen(["osascript", "-e", CHANGE_DIR % (data[alias])], stdout=subprocess.PIPE).communicate()
except:
speech.fail("Something seems to have gone wrong. Please report this error to michaelmelchione@gmail.com.")
return
speech.success("Successfully navigating to " + data[alias])
|
KeyError
|
dataset/ETHPy150Open mikemelch/hallie/hallie/modules/user.py/goToDirectory
|
1,010
|
def handle_iters(self):
for i in self.iterators:
try:
i.next()
except __HOLE__:
self.iterators.remove(i)
|
StopIteration
|
dataset/ETHPy150Open richo/groundstation/groundstation/station.py/Station.handle_iters
|
1,011
|
def create_channel(self, channel_name):
try:
os.mkdir(os.path.join(self.store.gref_path(), channel_name))
return True
except __HOLE__:
return False
|
OSError
|
dataset/ETHPy150Open richo/groundstation/groundstation/station.py/Station.create_channel
|
1,012
|
def write_graph(self, filename):
"""Writes the graph to the file with format based on file extension
"""
_, ext = os.path.splitext(filename)
try:
format = self.supported_formats[ext.lower()]
except __HOLE__:
raise ValueError("No format could be found for the extension '%s'. " \
"The supported formats are [%s]" %
(ext, ', '.join(self.supported_formats.keys())))
write_fn = getattr(nx, 'write_%s' % format)
write_fn(self, filename)
|
KeyError
|
dataset/ETHPy150Open croach/pydata2013/govtrack.py/Graph.write_graph
|
1,013
|
def party_affiliation(name):
"""Returns the member's political party affiliation
Given a name with the following format:
TITLE FIRST_NAME LAST_NAME [PARTY_AFFILIATION-DISTRICT_OR_STATE]
this function parses out the party affiliation and returns it.
"""
parties = {'R': 'republican', 'D': 'democrat', 'I': 'independent'}
party_abbrev = re.search('\[([A-Z])-[A-Z]{2}[^\]]*\]', name).groups()[0]
try:
return parties[party_abbrev]
except __HOLE__:
return party_abbrev
|
KeyError
|
dataset/ETHPy150Open croach/pydata2013/govtrack.py/party_affiliation
|
1,014
|
@staticmethod
def parse(version):
"""Attempts to parse the given string as Semver, then falls back to Namedver."""
try:
return Semver.parse(version)
except __HOLE__:
return Namedver.parse(version)
|
ValueError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/task/scm_publish_mixin.py/Version.parse
|
1,015
|
@classmethod
def parse(cls, version):
# must not contain whitespace
if not cls._VALID_NAME.match(version):
raise ValueError("Named versions must match {}: '{}'".format(cls._VALID_NAME.pattern, version))
if cls._INVALID_NAME.match(version):
raise ValueError("Named version must contain at least one alphanumeric character")
# must not be valid semver
try:
Semver.parse(version)
except __HOLE__:
return Namedver(version)
else:
raise ValueError("Named versions must not be valid semantic versions: '{0}'".format(version))
|
ValueError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/task/scm_publish_mixin.py/Namedver.parse
|
1,016
|
@staticmethod
def parse(version):
components = version.split('.', 3)
if len(components) != 3:
raise ValueError
major, minor, patch = components
def to_i(component):
try:
return int(component)
except (TypeError, __HOLE__):
raise ValueError('Invalid revision component {} in {} - '
'must be an integer'.format(component, version))
return Semver(to_i(major), to_i(minor), to_i(patch))
|
ValueError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/task/scm_publish_mixin.py/Semver.parse
|
1,017
|
def AddProperty(self, name, _type, animatable=True, user=True):
'''
Add a property to this component
@param name: the name of the property
@param _type: the data type of the property:
'''
if self.ListProperties(pattern=name):
raise Exception("Can not add property '%s'. Already exists on object '%'" % (name, self))
try:
typeData = self.kPropertyTypes[_type]
except __HOLE__:
raise Exception("Invalid property type '%s'. Valid types are: '%s'" % (_type, ', '.join(self.kPropertyTypes.keys())))
typeData.extend([animatable, user, None])
self.component.PropertyCreate(name, *typeData)
|
KeyError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/moBu/site-packages/PyMoBu-0.2/pymobu/components/__init__.py/PMBComponent.AddProperty
|
1,018
|
def SetInverseMatrix(self, matrix, worldSpace=False, _type='Transformation'):
'''
Set the inverse matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
try:
self.component.SetMatrix(matrix, self.kInverseMatrixTypeDict[_type], worldSpace)
except __HOLE__:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kInverseMatrixTypeDict.keys())))
|
KeyError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/moBu/site-packages/PyMoBu-0.2/pymobu/components/__init__.py/PMBModel.SetInverseMatrix
|
1,019
|
def SetMatrix(self, matrix, worldSpace=False, _type='Transformation'):
'''
Set the matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
try:
self.component.SetMatrix(matrix, self.kMatrixTypeDict[_type], worldSpace)
except __HOLE__:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kMatrixTypeDict.keys())))
|
KeyError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/moBu/site-packages/PyMoBu-0.2/pymobu/components/__init__.py/PMBModel.SetMatrix
|
1,020
|
def GetInverseMatrix(self, worldSpace=False, _type='Transformation'):
'''
Get the inverse matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
matrix = FBMatrix()
try:
self.component.GetMatrix(matrix, self.kInverseMatrixTypeDict[_type], worldSpace)
except __HOLE__:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kInverseMatrixTypeDict.keys())))
return matrix
|
KeyError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/moBu/site-packages/PyMoBu-0.2/pymobu/components/__init__.py/PMBModel.GetInverseMatrix
|
1,021
|
def GetMatrix(self, worldSpace=False, _type='Transformation'):
'''
Get the matrix
@param worldSpace: world space matrix (True/False) Default False
@param _type: matrix type (Transformation, Translation, Rotation, Scaling, Center, All)
'''
matrix = FBMatrix()
try:
self.component.GetMatrix(matrix, self.kMatrixTypeDict[_type], worldSpace)
except __HOLE__:
raise Exception("Invalid vector type '%s'. Valid types are: %s" % (_type, ', '.join(self.kMatrixTypeDict.keys())))
return matrix
|
KeyError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/moBu/site-packages/PyMoBu-0.2/pymobu/components/__init__.py/PMBModel.GetMatrix
|
1,022
|
@classmethod
def from_queryset(cls, queryset, fields=None):
if fields is None:
fields = queryset.model._meta.writable_fields
else:
fields = [queryset.model._meta.get_field(f) for f in fields]
table = cls(*(f.name for f in fields))
for instance in queryset:
values = []
for f in fields:
try:
fn = getattr(instance, 'get_{}_display'.format(f.name))
except __HOLE__:
value = f.dumps(getattr(instance, f.attname))
else:
value = fn()
values.append(value)
table.add_row(*values)
return table
|
AttributeError
|
dataset/ETHPy150Open natano/tiget/tiget/table.py/Table.from_queryset
|
1,023
|
def _fetch_changes(self, filenames, progress):
count = 0
total = len(filenames)
progress(_('Parsing RCS files'), count, total)
# We will commit changes to the database every few seconds to
# avoid having to scan all RCS files again in case the process
# is interrupted or an error occurs (a rescan isn't really bad
# but costs time.)
commit_time = 0
commit_interval = 10
self.metadb.begin_transaction()
for rcsfile in filenames:
try:
self._fetch_changes_from_rcsfile(rcsfile)
if time.time() - commit_time >= commit_interval:
try:
self.metadb.end_transaction()
except:
pass
self.metadb.begin_transaction()
commit_time = time.time()
count += 1
progress(_('Parsing RCS files'), count, total)
except __HOLE__:
# Re-raise the exception silently. An impatient user
# may interrupt the process and that should by handled
# gracefully.
raise
except:
# Print the file name where this error happened,
# regardless of whether the error is actually printed,
# just as a quick & dirty debugging aid.
# TODO: raise a FetchChangesError
print "(Error while processing %s)" % rcsfile
raise
finally:
try:
self.metadb.end_transaction()
except:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open ustuehler/git-cvs/cvsgit/cvs.py/CVS._fetch_changes
|
1,024
|
def get_admin_delete_url(self):
try:
# Django <=1.6
model_name = self._meta.module_name
except __HOLE__:
# Django >1.6
model_name = self._meta.model_name
return urlresolvers.reverse(
'admin:{0}_{1}_delete'.format(self._meta.app_label, model_name,),
args=(self.pk,))
|
AttributeError
|
dataset/ETHPy150Open divio/django-filer/filer/models/foldermodels.py/Folder.get_admin_delete_url
|
1,025
|
def test_invalid_class():
ppn = Perceptron(epochs=40, eta=0.01, random_seed=1)
try:
ppn.fit(X, y2) # -2, 1 class
assert(1 == 2)
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open rasbt/mlxtend/mlxtend/classifier/tests/test_perceptron.py/test_invalid_class
|
1,026
|
def find_test_case(self):
splitted_test_target = self.test_target.split(".")
module_ref = None
module_name_len = 0
for i in range(len(splitted_test_target)):
try:
module_ref = importlib.import_module(".".join(splitted_test_target[:i + 1]))
module_name_len = i + 1
except __HOLE__ as e:
if splitted_test_target[i] in str(e):
break
raise
if module_ref is None:
raise ImportError("Test target <%s> is invalid.\nNo module named <%s>."% (self.test_target, splitted_test_target[0]))
test_target_len = len(splitted_test_target)
if module_name_len == test_target_len:
if hasattr(module_ref, "__path__"):
# test target is package
self.find_test_cases_in_package(module_ref)
else:
# test target is module
self.find_test_cases_in_module(module_ref)
elif module_name_len == test_target_len - 1:
# test target is class
self.test_class_filter_group.append_filter(TestClassNameFilter(splitted_test_target[-1]))
self.find_test_cases_in_module(module_ref)
elif module_name_len == test_target_len - 2:
# test target is method
self.test_class_filter_group.append_filter(TestClassNameFilter(splitted_test_target[-2]))
self.test_case_filter_group.append_filter(TestCaseNameFilter(splitted_test_target[-1]))
self.find_test_cases_in_module(module_ref)
else:
raise ImportError("Test target <%s> is probably invalid.\nModule <%s> exists but module <%s> doesn't."% (
self.test_target, ".".join(splitted_test_target[:module_name_len]), ".".join(splitted_test_target[:module_name_len + 1])))
|
ImportError
|
dataset/ETHPy150Open KarlGong/ptest/ptest/testfinder.py/TestFinder.find_test_case
|
1,027
|
def find_test_cases_in_module(self, module_ref):
for module_element in dir(module_ref):
test_class_ref = getattr(module_ref, module_element)
try:
if test_class_ref.__pd_type__ == PDecoratorType.TestClass \
and test_class_ref.__enabled__ \
and test_class_ref.__module__ == module_ref.__name__ \
and self.test_class_filter_group.filter(test_class_ref):
self.find_test_cases_in_class(test_class_ref)
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open KarlGong/ptest/ptest/testfinder.py/TestFinder.find_test_cases_in_module
|
1,028
|
def find_test_cases_in_class(self, test_class_ref):
for class_element in dir(test_class_ref):
test_case_ref = getattr(test_class_ref(), class_element)
try:
if test_case_ref.__pd_type__ == PDecoratorType.Test \
and test_case_ref.__enabled__ \
and self.test_case_filter_group.filter(test_case_ref):
self.found_test_case_count += 1
if not self.target_test_suite.add_test_case(test_case_ref):
self.repeated_test_case_count += 1
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open KarlGong/ptest/ptest/testfinder.py/TestFinder.find_test_cases_in_class
|
1,029
|
@cached_property
def image(self):
"""Return a Pil representation of this image """
if sys.version < '3':
imageio = StringIO.StringIO(self._image_data)
else:
imageio = StringIO.BytesIO(self._image_data)
try:
source_image = PILImage.open(imageio)
img = PILImage.new('RGBA', source_image.size, (0, 0, 0, 0))
if source_image.mode == 'L':
alpha = source_image.split()[0]
transparency = source_image.info.get('transparency')
mask = PILImage.eval(alpha, lambda a: 0 if a == transparency else 255)
img.paste(source_image, (0, 0), mask=mask)
else:
img.paste(source_image, (0, 0))
except __HOLE__, e:
raise PILUnavailableError(e.args[0].split()[1])
finally:
imageio.close()
self.original_width, self.original_height = img.size
# Crop the image searching for the smallest possible bounding box
# without losing any non-transparent pixel.
# This crop is only used if the crop flag is set in the config.
if self.config['crop']:
img = img.crop(img.split()[-1].getbbox())
return img
|
IOError
|
dataset/ETHPy150Open jorgebastida/glue/glue/core.py/Image.image
|
1,030
|
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except __HOLE__:
raise AttributeError("no such move, %r" % (name,))
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/utils/six.py/remove_move
|
1,031
|
def get_host_target(env):
debug('vc.py:get_host_target()')
host_platform = env.get('HOST_ARCH')
if not host_platform:
host_platform = platform.machine()
# TODO(2.5): the native Python platform.machine() function returns
# '' on all Python versions before 2.6, after which it also uses
# PROCESSOR_ARCHITECTURE.
if not host_platform:
host_platform = os.environ.get('PROCESSOR_ARCHITECTURE', '')
# Retain user requested TARGET_ARCH
req_target_platform = env.get('TARGET_ARCH')
debug('vc.py:get_host_target() req_target_platform:%s'%req_target_platform)
if req_target_platform:
# If user requested a specific platform then only try that one.
target_platform = req_target_platform
else:
target_platform = host_platform
try:
host = _ARCH_TO_CANONICAL[host_platform.lower()]
except __HOLE__, e:
msg = "Unrecognized host architecture %s"
raise ValueError(msg % repr(host_platform))
try:
target = _ARCH_TO_CANONICAL[target_platform.lower()]
except KeyError, e:
all_archs = str(_ARCH_TO_CANONICAL.keys())
raise ValueError("Unrecognized target architecture %s\n\tValid architectures: %s" % (target_platform, all_archs))
return (host, target,req_target_platform)
# If you update this, update SupportedVSList in Tool/MSCommon/vs.py, and the
# MSVC_VERSION documentation in Tool/msvc.xml.
|
KeyError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/MSCommon/vc.py/get_host_target
|
1,032
|
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = ''.join([x for x in msvc_version if x in string_digits + '.'])
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except __HOLE__, e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
|
ValueError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/MSCommon/vc.py/msvc_version_to_maj_min
|
1,033
|
def find_vc_pdir(msvc_version):
"""Try to find the product directory for the given
version.
Note
----
If for some reason the requested version could not be found, an
exception which inherits from VisualCException will be raised."""
root = 'Software\\'
if common.is_win64():
root = root + 'Wow6432Node\\'
try:
hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version]
except __HOLE__:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
for key in hkeys:
key = root + key
try:
comps = common.read_reg(key)
except WindowsError, e:
debug('find_vc_dir(): no VC registry key %s' % repr(key))
else:
debug('find_vc_dir(): found VC in registry: %s' % comps)
if os.path.exists(comps):
return comps
else:
debug('find_vc_dir(): reg says dir is %s, but it does not exist. (ignoring)'\
% comps)
raise MissingConfiguration("registry dir %s not found on the filesystem" % comps)
return None
|
KeyError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/MSCommon/vc.py/find_vc_pdir
|
1,034
|
def msvc_setup_env_once(env):
try:
has_run = env["MSVC_SETUP_RUN"]
except __HOLE__:
has_run = False
if not has_run:
msvc_setup_env(env)
env["MSVC_SETUP_RUN"] = True
|
KeyError
|
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/MSCommon/vc.py/msvc_setup_env_once
|
1,035
|
def view_judge(request, judge_id):
judge_id = int(judge_id)
try:
judge = Judge.objects.get(pk=judge_id)
except Judge.DoesNotExist:
return render_to_response('error.html',
{'error_type': "View Judge",
'error_name': str(judge_id),
'error_info':"No such judge"},
context_instance=RequestContext(request))
if request.method == 'POST':
form = JudgeForm(request.POST,instance=judge)
if form.is_valid():
try:
form.save()
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Judge",
'error_name': "["+form.cleaned_data['name']+"]",
'error_info':"Judge information cannot be validated."},
context_instance=RequestContext(request))
return render_to_response('thanks.html',
{'data_type': "Judge",
'data_name': "["+form.cleaned_data['name']+"]",
'data_modification': "EDIT"},
context_instance=RequestContext(request))
else :
return render_to_response('error.html',
{'error_type': "Judge",
'error_name': "",
'error_info': form.errors},
context_instance=RequestContext(request))
else:
form = JudgeForm(instance=judge)
base_url = '/judge/'+str(judge_id)+'/'
scratch_url = base_url + 'scratches/view/'
delete_url = base_url + 'delete/'
links = [(scratch_url,'Scratches for '+str(judge.name),False),
(delete_url,'Delete', True)]
return render_to_response('data_entry.html',
{'form': form,
'links': links,
'title': "Viewing Judge: %s" %(judge.name)},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/judge_views.py/view_judge
|
1,036
|
def enter_judge(request):
if request.method == 'POST':
form = JudgeForm(request.POST)
if form.is_valid():
try:
form.save()
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Judge",
'error_name': "["+cd['name']+"]",
'error_info': "Judge Cannot Validate!"},
context_instance=RequestContext(request))
return render_to_response('thanks.html',
{'data_type': "Judge",
'data_name': "["+form.cleaned_data['name']+"]",
'data_modification': "CREATED",
'enter_again': True},
context_instance=RequestContext(request))
else:
form = JudgeForm(first_entry=True)
return render_to_response('data_entry.html',
{'form': form, 'title': "Create Judge"},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/judge_views.py/enter_judge
|
1,037
|
def add_scratches(request, judge_id, number_scratches):
try:
judge_id,number_scratches = int(judge_id),int(number_scratches)
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Scratch",'error_name': "Data Entry",
'error_info':"I require INTEGERS!"},
context_instance=RequestContext(request))
try:
judge = Judge.objects.get(pk=judge_id)
except Judge.DoesNotExist:
return render_to_response('error.html',
{'error_type': "Add Scratches for Judge",
'error_name': str(judge_id),
'error_info':"No such Judge"},
context_instance=RequestContext(request))
if request.method == 'POST':
forms = [ScratchForm(request.POST, prefix=str(i)) for i in range(1,number_scratches+1)]
all_good = True
for form in forms:
all_good = all_good and form.is_valid()
if all_good:
for form in forms:
form.save()
return render_to_response('thanks.html',
{'data_type': "Scratches for Judge",
'data_name': "["+str(judge_id)+"]",
'data_modification': "CREATED"},
context_instance=RequestContext(request))
else:
forms = [ScratchForm(prefix=str(i), initial={'judge':judge_id,'scratch_type':0}) for i in range(1,number_scratches+1)]
return render_to_response('data_entry_multiple.html',
{'forms': zip(forms,[None]*len(forms)),
'data_type':'Scratch',
'title':"Adding Scratch(es) for %s"%(judge.name)},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/judge_views.py/add_scratches
|
1,038
|
def view_scratches(request, judge_id):
try:
judge_id = int(judge_id)
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Scratch",'error_name': "Delete",
'error_info':"I require INTEGERS!"},
context_instance=RequestContext(request))
scratches = Scratch.objects.filter(judge=judge_id)
judge = Judge.objects.get(pk=judge_id)
number_scratches = len(scratches)
if request.method == 'POST':
forms = [ScratchForm(request.POST, prefix=str(i),instance=scratches[i-1]) for i in range(1,number_scratches+1)]
all_good = True
for form in forms:
all_good = all_good and form.is_valid()
if all_good:
for form in forms:
form.save()
return render_to_response('thanks.html',
{'data_type': "Scratches for judge",
'data_name': "["+str(judge_id)+"]",
'data_modification': "EDITED"},
context_instance=RequestContext(request))
else:
forms = [ScratchForm(prefix=str(i), instance=scratches[i-1]) for i in range(1,len(scratches)+1)]
delete_links = ["/judge/"+str(judge_id)+"/scratches/delete/"+str(scratches[i].id) for i in range(len(scratches))]
links = [('/judge/'+str(judge_id)+'/scratches/add/1/','Add Scratch',False)]
return render_to_response('data_entry_multiple.html',
{'forms': zip(forms,delete_links),
'data_type':'Scratch',
'links':links,
'title':"Viewing Scratch Information for %s"%(judge.name)},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/judge_views.py/view_scratches
|
1,039
|
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, __HOLE__):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
|
NameError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/_strerror
|
1,040
|
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except __HOLE__:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
|
TypeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/dispatcher.__repr__
|
1,041
|
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
except __HOLE__:
return None
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
|
TypeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/dispatcher.accept
|
1,042
|
def __getattr__(self, attr):
try:
retattr = getattr(self.socket, attr)
except __HOLE__:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, attr))
else:
msg = "%(me)s.%(attr)s is deprecated. Use %(me)s.socket.%(attr)s " \
"instead." % {'me': self.__class__.__name__, 'attr':attr}
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return retattr
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
|
AttributeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/dispatcher.__getattr__
|
1,043
|
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in map.values():
try:
x.close()
except __HOLE__, x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
|
OSError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/close_all
|
1,044
|
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except __HOLE__:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
AttributeError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/asyncore.py/file_dispatcher.__init__
|
1,045
|
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except __HOLE__:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
|
KeyError
|
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/__init__.py/clear_mappers
|
1,046
|
def Open(self, urn, aff4_type=None, mode="r"):
"""Opens the named object.
DeletionPool will only open the object if it's not in the pool already.
Otherwise it will just return the cached version. Objects are cached
based on their urn and mode. I.e. same object opened with mode="r" and
mode="rw" will be actually opened two times and cached separately.
DeletionPool's Open() also doesn't follow symlinks.
Args:
urn: The urn to open.
aff4_type: If this parameter is set, we raise an IOError if
the object is not an instance of this type.
mode: The mode to open the file with.
Returns:
An AFF4Object instance.
Raises:
IOError: If the object is not of the required type.
"""
key = self._ObjectKey(urn, mode)
try:
obj = self._objects_cache[key]
except __HOLE__:
obj = FACTORY.Open(urn, mode=mode, follow_symlinks=False,
token=self._token)
self._objects_cache[key] = obj
if (aff4_type is not None and
not isinstance(obj, AFF4Object.classes[aff4_type])):
raise InstantiationError(
"Object %s is of type %s, but required_type is %s" % (
urn, obj.__class__.__name__, aff4_type))
return obj
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/DeletionPool.Open
|
1,047
|
def MultiOpen(self, urns, aff4_type=None, mode="r"):
"""Opens many urns efficiently, returning cached objects when possible."""
result = []
not_opened_urns = []
for urn in urns:
key = self._ObjectKey(urn, mode)
try:
result.append(self._objects_cache[key])
except __HOLE__:
not_opened_urns.append(urn)
if not_opened_urns:
objs = FACTORY.MultiOpen(not_opened_urns, follow_symlinks=False,
mode=mode, token=self._token)
for obj in objs:
result.append(obj)
key = self._ObjectKey(obj.urn, mode)
self._objects_cache[key] = obj
if aff4_type is not None:
type_checked_result = []
for obj in result:
if isinstance(obj, AFF4Object.classes[aff4_type]):
type_checked_result.append(obj)
return type_checked_result
else:
return result
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/DeletionPool.MultiOpen
|
1,048
|
def ListChildren(self, urn):
"""Lists children of a given urn. Resulting list is cached."""
result = self.MultiListChildren([urn])
try:
return result[urn]
except __HOLE__:
return []
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/DeletionPool.ListChildren
|
1,049
|
def MultiListChildren(self, urns):
"""Lists children of a bunch of given urns. Results are cached."""
result = {}
not_listed_urns = []
for urn in urns:
try:
result[urn] = self._children_lists_cache[urn]
except __HOLE__:
not_listed_urns.append(urn)
if not_listed_urns:
for urn, children in FACTORY.MultiListChildren(
not_listed_urns, token=self._token):
result[urn] = self._children_lists_cache[urn] = children
for urn in not_listed_urns:
self._children_lists_cache.setdefault(urn, [])
result.setdefault(urn, [])
return result
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/DeletionPool.MultiListChildren
|
1,050
|
def RecursiveMultiListChildren(self, urns):
"""Recursively lists given urns. Results are cached."""
result = {}
checked_urns = set()
not_cached_urns = []
urns_to_check = urns
while True:
found_children = []
for urn in urns_to_check:
try:
children = result[urn] = self._children_lists_cache[urn]
found_children.extend(children)
except __HOLE__:
not_cached_urns.append(urn)
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break
for urn, children in FACTORY.RecursiveMultiListChildren(
not_cached_urns, token=self._token):
result[urn] = self._children_lists_cache[urn] = children
return result
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/DeletionPool.RecursiveMultiListChildren
|
1,051
|
@classmethod
def ParseAgeSpecification(cls, age):
"""Parses an aff4 age and returns a datastore age specification."""
try:
return (0, int(age))
except (ValueError, __HOLE__):
pass
if age == NEWEST_TIME:
return data_store.DB.NEWEST_TIMESTAMP
elif age == ALL_TIMES:
return data_store.DB.ALL_TIMESTAMPS
elif len(age) == 2:
start, end = age
return (int(start), int(end))
raise RuntimeError("Unknown age specification: %s" % age)
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.ParseAgeSpecification
|
1,052
|
def GetAttributes(self, urns, ignore_cache=False, token=None,
age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = set([utils.SmartUnicode(u) for u in urns])
if not ignore_cache:
for subject in list(urns):
key = self._MakeCacheInvariant(subject, token, age)
try:
yield subject, self.cache.Get(key)
urns.remove(subject)
except __HOLE__:
pass
# If there are any urns left we get them from the database.
if urns:
for subject, values in data_store.DB.MultiResolvePrefix(
urns, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age),
token=token, limit=None):
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
key = self._MakeCacheInvariant(subject, token, age)
self.cache.Put(key, values)
yield utils.SmartUnicode(subject), values
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.GetAttributes
|
1,053
|
def SetAttributes(self, urn, attributes, to_delete, add_child_index=True,
mutation_pool=None, sync=False, token=None):
"""Sets the attributes in the data store and update the cache."""
# Force a data_store lookup next.
try:
# Expire all entries in the cache for this urn (for all tokens, and
# timestamps)
self.cache.ExpirePrefix(utils.SmartStr(urn) + ":")
except __HOLE__:
pass
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()]
to_delete.add(AFF4Object.SchemaCls.LAST)
if mutation_pool:
mutation_pool.MultiSet(urn, attributes, replace=False,
to_delete=to_delete)
else:
data_store.DB.MultiSet(urn, attributes, token=token,
replace=False, sync=sync, to_delete=to_delete)
if add_child_index:
self._UpdateChildIndex(urn, token, mutation_pool=mutation_pool)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.SetAttributes
|
1,054
|
def _UpdateChildIndex(self, urn, token, mutation_pool=None):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
token: The token to use.
mutation_pool: An optional MutationPool object to write to. If not given,
the data_store is used directly.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return
except __HOLE__:
attributes = {
# This updates the directory index.
"index:dir/%s" % utils.SmartStr(basename): [EMPTY_DATA],
}
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(user): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u"/":
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()]
if mutation_pool:
mutation_pool.MultiSet(dirname, attributes, replace=True)
else:
data_store.DB.MultiSet(dirname, attributes,
token=token, replace=True, sync=False)
self.intermediate_cache.Put(urn, 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory._UpdateChildIndex
|
1,055
|
def _DeleteChildFromIndex(self, urn, token):
try:
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.ExpireObject(urn.Path())
except __HOLE__:
pass
data_store.DB.DeleteAttributes(
dirname, ["index:dir/%s" % utils.SmartStr(basename)], token=token,
sync=False)
data_store.DB.MultiSet(dirname, {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()],
}, token=token, replace=True, sync=False)
except access_control.UnauthorizedAccess:
pass
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory._DeleteChildFromIndex
|
1,056
|
def MultiOpen(self, urns, mode="rw", ignore_cache=False, token=None,
aff4_type=None, age=NEWEST_TIME, follow_symlinks=True):
"""Opens a bunch of urns efficiently."""
if token is None:
token = data_store.default_token
if mode not in ["w", "r", "rw"]:
raise RuntimeError("Invalid mode %s" % mode)
symlinks = {}
for urn, values in self.GetAttributes(urns, token=token, age=age):
try:
obj = self.Open(urn, mode=mode, ignore_cache=ignore_cache, token=token,
local_cache={urn: values}, age=age,
follow_symlinks=False)
# We can't pass aff4_type to Open since it will raise on AFF4Symlinks.
# Setting it here, if needed, so that BadGetAttributeError checking
# works.
if aff4_type:
obj.aff4_type = aff4_type
if follow_symlinks and isinstance(obj, AFF4Symlink):
target = obj.Get(obj.Schema.SYMLINK_TARGET)
if target is not None:
symlinks[target] = obj.urn
elif aff4_type:
if isinstance(obj, AFF4Object.classes[aff4_type]):
yield obj
else:
yield obj
except __HOLE__:
pass
if symlinks:
for obj in self.MultiOpen(symlinks, mode=mode, ignore_cache=ignore_cache,
token=token, aff4_type=aff4_type, age=age):
obj.symlink_urn = symlinks[obj.urn]
yield obj
|
IOError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.MultiOpen
|
1,057
|
def Create(self, urn, aff4_type, mode="w", token=None, age=NEWEST_TIME,
ignore_cache=False, force_new_version=True,
object_exists=False, mutation_pool=None, transaction=None):
"""Creates the urn if it does not already exist, otherwise opens it.
If the urn exists and is of a different type, this will also promote it to
the specified type.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
mode: The desired mode for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
ignore_cache: Bypass the aff4 cache.
force_new_version: Forces the creation of a new object in the data_store.
object_exists: If we know the object already exists we can skip index
creation.
mutation_pool: An optional MutationPool object to write to. If not given,
the data_store is used directly.
transaction: For locked objects, a transaction is passed to the object.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if token is None:
token = data_store.default_token
if urn is not None:
urn = rdfvalue.RDFURN(urn)
if "r" in mode:
# Check to see if an object already exists.
try:
existing = self.Open(
urn, mode=mode, token=token, age=age,
ignore_cache=ignore_cache, transaction=transaction)
result = existing.Upgrade(aff4_type)
# We can't pass aff4_type into the Open call since it will raise with a
# type mismatch. We set it like this so BadGetAttributeError checking
# works.
if aff4_type:
result.aff4_type = aff4_type
if force_new_version and existing.Get(result.Schema.TYPE) != aff4_type:
result.ForceNewVersion()
return result
except __HOLE__:
pass
# Object does not exist, just make it.
cls = AFF4Object.classes[str(aff4_type)]
result = cls(urn, mode=mode, token=token, age=age, aff4_type=aff4_type,
object_exists=object_exists, mutation_pool=mutation_pool,
transaction=transaction)
result.Initialize()
if force_new_version:
result.ForceNewVersion()
return result
|
IOError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.Create
|
1,058
|
def MultiDelete(self, urns, token=None):
"""Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
RuntimeError: If one of the urns is too short. This is a safety check to
ensure the root is not removed.
"""
urns = [rdfvalue.RDFURN(urn) for urn in urns]
if token is None:
token = data_store.default_token
for urn in urns:
if urn.Path() == "/":
raise RuntimeError("Can't delete root URN. Please enter a valid URN")
deletion_pool = DeletionPool(token=token)
for urn in urns:
deletion_pool.MarkForDeletion(urn)
marked_root_urns = deletion_pool.root_urns_for_deletion
marked_urns = deletion_pool.urns_for_deletion
logging.debug(
u"Found %d objects to remove when removing %s",
len(marked_urns), urns)
logging.debug(
u"Removing %d root objects when removing %s: %s",
len(marked_root_urns), urns, marked_root_urns)
for root in marked_root_urns:
# Only the index of the parent object should be updated. Everything
# below the target object (along with indexes) is going to be
# deleted.
self._DeleteChildFromIndex(root, token)
for urn_to_delete in marked_urns:
try:
self.intermediate_cache.ExpireObject(urn_to_delete.Path())
except __HOLE__:
pass
data_store.DB.DeleteSubject(urn_to_delete, token=token, sync=False)
logging.debug(u"%s deleted from data store", urn_to_delete)
# Ensure this is removed from the cache as well.
self.Flush()
logging.debug("Removed %d objects", len(marked_urns))
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Factory.MultiDelete
|
1,059
|
def __init__(self, predicate, attribute_type=rdfvalue.RDFString,
description="", name=None, _copy=False, default=None, index=None,
versioned=True, lock_protected=False,
creates_new_object_version=True):
"""Constructor.
Args:
predicate: The name of this attribute - must look like a URL
(e.g. aff4:contains). Will be used to store the attribute.
attribute_type: The RDFValue type of this attributes.
description: A one line description of what this attribute represents.
name: A human readable name for the attribute to be used in filters.
_copy: Used internally to create a copy of this object without
registering.
default: A default value will be returned if the attribute is not set on
an object. This can be a constant or a callback which receives the fd
itself as an arg.
index: The name of the index to use for this attribute. If None, the
attribute will not be indexed.
versioned: Should this attribute be versioned? Non-versioned attributes
always overwrite other versions of the same attribute.
lock_protected: If True, this attribute may only be set if the object was
opened via OpenWithLock().
creates_new_object_version: If this is set, a write to this attribute
will also write a new version of the parent attribute. This should be
False for attributes where lots of entries are collected like logs.
"""
self.name = name
self.predicate = predicate
self.attribute_type = attribute_type
self.description = description
self.default = default
self.index = index
self.versioned = versioned
self.lock_protected = lock_protected
self.creates_new_object_version = creates_new_object_version
# Field names can refer to a specific component of an attribute
self.field_names = []
if not _copy:
# Check the attribute registry for conflicts
try:
old_attribute = Attribute.PREDICATES[predicate]
if old_attribute.attribute_type != attribute_type:
msg = "Attribute %s defined with conflicting types (%s, %s)" % (
predicate, old_attribute.attribute_type.__class__.__name__,
attribute_type.__class__.__name__)
logging.error(msg)
raise RuntimeError(msg)
except __HOLE__:
pass
# Register
self.PREDICATES[predicate] = self
if name:
self.NAMES[name] = self
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Attribute.__init__
|
1,060
|
@classmethod
def GetAttributeByName(cls, name):
# Support attribute names with a . in them:
try:
if "." in name:
name, field = name.split(".", 1)
return cls.NAMES[name][field]
return cls.NAMES[name]
except __HOLE__:
raise AttributeError("Invalid attribute %s" % name)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Attribute.GetAttributeByName
|
1,061
|
def GetRDFValueType(self):
"""Returns this attribute's RDFValue class."""
result = self.attribute_type
for field_name in self.field_names:
# Support the new semantic protobufs.
if issubclass(result, rdf_structs.RDFProtoStruct):
try:
result = result.type_infos.get(field_name).type
except __HOLE__:
raise AttributeError("Invalid attribute %s" % field_name)
else:
# TODO(user): Remove and deprecate.
# Support for the old RDFProto.
result = result.rdf_map.get(field_name, rdfvalue.RDFString)
return result
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/Attribute.GetRDFValueType
|
1,062
|
def Validate(self):
try:
Attribute.GetAttributeByName(self._value)
except (AttributeError, __HOLE__):
raise type_info.TypeValueError(
"Value %s is not an AFF4 attribute name" % self._value)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4Attribute.Validate
|
1,063
|
def __init__(self, urn, mode="r", parent=None, clone=None, token=None,
local_cache=None, age=NEWEST_TIME, follow_symlinks=True,
aff4_type=None, object_exists=False, mutation_pool=None,
transaction=None):
if urn is not None:
urn = rdfvalue.RDFURN(urn)
self.urn = urn
self.mode = mode
self.parent = parent
self.token = token
self.age_policy = age
self.follow_symlinks = follow_symlinks
self.lock = utils.PickleableLock()
self.mutation_pool = mutation_pool
self.transaction = transaction
if transaction and mutation_pool:
raise ValueError("Cannot use a locked object with a mutation pool!")
# If object was opened through a symlink, "symlink_urn" attribute will
# contain a sylmink urn.
self.symlink_urn = None
# The object already exists in the data store - we do not need to update
# indexes.
self.object_exists = object_exists
# This flag will be set whenever an attribute is changed that has the
# creates_new_object_version flag set.
self._new_version = False
# Mark out attributes to delete when Flushing()
self._to_delete = set()
# If an explicit aff4 type is requested we store it here so we know to
# verify aff4 attributes exist in the schema at Get() time.
self.aff4_type = aff4_type
# We maintain two attribute caches - self.synced_attributes reflects the
# attributes which are synced with the data_store, while self.new_attributes
# are new attributes which still need to be flushed to the data_store. When
# this object is instantiated we populate self.synced_attributes with the
# data_store, while the finish method flushes new changes.
if clone is not None:
if isinstance(clone, dict):
# Just use these as the attributes, do not go to the data store. This is
# a quick way of creating an object with data which was already fetched.
self.new_attributes = {}
self.synced_attributes = clone
elif isinstance(clone, AFF4Object):
# We were given another object to clone - we do not need to access the
# data_store now.
self.new_attributes = clone.new_attributes.copy()
self.synced_attributes = clone.synced_attributes.copy()
else:
raise RuntimeError("Cannot clone from %s." % clone)
else:
self.new_attributes = {}
self.synced_attributes = {}
if "r" in mode:
if local_cache:
try:
for attribute, value, ts in local_cache[utils.SmartUnicode(urn)]:
self.DecodeValueFromAttribute(attribute, value, ts)
except __HOLE__:
pass
else:
# Populate the caches from the data store.
for urn, values in FACTORY.GetAttributes([urn], age=age,
token=self.token):
for attribute_name, value, ts in values:
self.DecodeValueFromAttribute(attribute_name, value, ts)
if clone is None:
self.Initialize()
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4Object.__init__
|
1,064
|
def DecodeValueFromAttribute(self, attribute_name, value, ts):
"""Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute.
"""
try:
# Get the Attribute object from our schema.
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts),
self.synced_attributes)
except __HOLE__:
pass
# TODO(user): uncomment as soon as some messages-flood protection
# mechanisms are implemented in logging.debug().
# if not attribute_name.startswith("index:"):
# logging.debug("Attribute %s not defined, skipping.", attribute_name)
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.",
self.urn, attribute_name)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4Object.DecodeValueFromAttribute
|
1,065
|
def Get(self, attribute, default=None):
"""Gets the attribute from this object."""
if attribute is None:
return default
# Allow the user to specify the attribute by name.
elif isinstance(attribute, str):
attribute = Attribute.GetAttributeByName(attribute)
# We can't read attributes from the data_store unless read mode was
# specified. It is ok to read new attributes though.
if "r" not in self.mode and (attribute not in self.new_attributes and
attribute not in self.synced_attributes):
raise IOError(
"Fetching %s from object not opened for reading." % attribute)
for result in self.GetValuesForAttribute(attribute, only_one=True):
try:
# The attribute may be a naked string or int - i.e. not an RDFValue at
# all.
result.attribute_instance = attribute
except __HOLE__:
pass
return result
return attribute.GetDefault(self, default)
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4Object.Get
|
1,066
|
def _GetChunkForWriting(self, chunk):
"""Opens a chunk for writing, creating a new one if it doesn't exist yet."""
try:
chunk = self.chunk_cache.Get(chunk)
chunk.dirty = True
return chunk
except __HOLE__:
pass
try:
chunk = self._ReadChunk(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
fd = StringIO.StringIO()
fd.chunk = chunk
fd.dirty = True
self.chunk_cache.Put(chunk, fd)
return fd
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4ImageBase._GetChunkForWriting
|
1,067
|
def _GetChunkForReading(self, chunk):
"""Returns the relevant chunk from the datastore and reads ahead."""
try:
return self.chunk_cache.Get(chunk)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
missing_chunks = []
for chunk_number in range(chunk, chunk + 10):
if chunk_number not in self.chunk_cache:
missing_chunks.append(chunk_number)
self._ReadChunks(missing_chunks)
# This should work now - otherwise we just give up.
try:
return self.chunk_cache.Get(chunk)
except __HOLE__:
raise ChunkNotFoundError("Cannot open chunk %s" % chunk)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/lib/aff4.py/AFF4ImageBase._GetChunkForReading
|
1,068
|
def load_performance_check_config():
config_file = os.path.join(PATH_TO_SYSTEM_PAASTA_CONFIG_DIR, 'performance-check.json')
try:
with open(config_file) as f:
return json.load(f)
except __HOLE__ as e:
print "No performance check config to use. Safely bailing."
print e.strerror
sys.exit(0)
|
IOError
|
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/performance_check.py/load_performance_check_config
|
1,069
|
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except __HOLE__:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
|
TypeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_namespaces
|
1,070
|
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (__HOLE__, AttributeError):
_raise_serialization_error(text)
|
TypeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_encode
|
1,071
|
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (__HOLE__, AttributeError):
_raise_serialization_error(text)
|
TypeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_escape_cdata
|
1,072
|
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, __HOLE__):
_raise_serialization_error(text)
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_escape_attrib
|
1,073
|
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (__HOLE__, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
|
TypeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_escape_attrib_html
|
1,074
|
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except __HOLE__:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_IterParseIterator.__init__
|
1,075
|
def next(self):
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except __HOLE__:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
|
IndexError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/_IterParseIterator.next
|
1,076
|
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except __HOLE__:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
|
ImportError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/XMLParser.__init__
|
1,077
|
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except __HOLE__:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
|
KeyError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/XMLParser._fixname
|
1,078
|
def _comment(self, data):
try:
comment = self.target.comment
except __HOLE__:
pass
else:
return comment(self._fixtext(data))
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/XMLParser._comment
|
1,079
|
def _pi(self, target, data):
try:
pi = self.target.pi
except __HOLE__:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/XMLParser._pi
|
1,080
|
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except __HOLE__:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
|
KeyError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/xml/etree/ElementTree.py/XMLParser._default
|
1,081
|
def run(self, lib, opts, args):
file_path = opts.output
file_format = self.config['default_format'].get(str)
file_mode = 'a' if opts.append else 'w'
format_options = self.config[file_format]['formatting'].get(dict)
export_format = ExportFormat.factory(
file_format, **{
'file_path': file_path,
'file_mode': file_mode
}
)
items = []
data_collector = library_data if opts.library else tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(','))
key_filter = make_key_filter(included_keys)
for data_emitter in data_collector(lib, ui.decargs(args)):
try:
data, item = data_emitter()
except (mediafile.UnreadableFileError, __HOLE__) as ex:
self._log.error(u'cannot read file: {0}', ex)
continue
data = key_filter(data)
items += [data]
export_format.export(items, **format_options)
|
IOError
|
dataset/ETHPy150Open beetbox/beets/beetsplug/export.py/ExportPlugin.run
|
1,082
|
def file(self, address):
""" Return a readable file-like object for the specified address. """
# Do the import here 'cos I'm not sure how much this will actually
# be used.
from .._compat import urlopen, HTTPError
try:
f = urlopen('http://' + address)
except __HOLE__:
raise NoSuchResourceError('http:://' + address)
return f
#### EOF ######################################################################
|
HTTPError
|
dataset/ETHPy150Open enthought/envisage/envisage/resource/http_resource_protocol.py/HTTPResourceProtocol.file
|
1,083
|
def cast_to_route_factory(in_arg):
from meta import MetaApplication
if isinstance(in_arg, BaseRoute):
return in_arg
elif isinstance(in_arg, Sequence):
try:
if in_arg[1] is MetaApplication:
raise ValueError(_meta_exc_msg)
if isinstance(in_arg[1], BaseApplication):
return SubApplication(*in_arg)
if callable(in_arg[1]):
return Route(*in_arg)
except __HOLE__:
pass
raise TypeError('Could not create route from %r' % (in_arg,))
|
TypeError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/application.py/cast_to_route_factory
|
1,084
|
def __init__(self, routes=None, resources=None, middlewares=None,
render_factory=None, error_handler=None, **kwargs):
self.debug = kwargs.pop('debug', None)
self.slash_mode = kwargs.pop('slash_mode', S_REDIRECT)
if kwargs:
raise TypeError('unexpected keyword args: %r' % kwargs.keys())
self.resources = dict(resources or {})
resource_conflicts = [r for r in RESERVED_ARGS if r in self.resources]
if resource_conflicts:
raise NameError('resource names conflict with builtins: %r' %
resource_conflicts)
try:
self.middlewares = list(middlewares or [])
except __HOLE__:
# TODO: tmp message until 0.6 or so
raise TypeError('expected an iterable for middlewares (as of '
'Clastic 0.4, middlewares and render_factory '
'swapped argument position)')
check_middlewares(self.middlewares)
self.render_factory = render_factory
self.set_error_handler(error_handler)
routes = routes or []
self.routes = []
self._null_route = NullRoute()
self._null_route.bind(self)
for entry in routes:
self.add(entry)
|
TypeError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/application.py/BaseApplication.__init__
|
1,085
|
def set_error_handler(self, error_handler=None):
if error_handler is None:
if self.debug:
deh_type = self.default_debug_error_handler_type
else:
deh_type = self.default_error_handler_type
error_handler = deh_type()
check_render_error(error_handler.render_error, self.resources)
wsgi_wrapper = getattr(error_handler, 'wsgi_wrapper', None)
if wsgi_wrapper is None:
pass # no wsgi_wrapper, no problem
elif not callable(wsgi_wrapper):
raise TypeError('expected error_handler.wsgi_wrapper to be'
' callable or None, not %r' % (wsgi_wrapper,))
else:
wrapped_wsgi = wsgi_wrapper(self._dispatch_wsgi)
try:
check_valid_wsgi(wrapped_wsgi)
except __HOLE__ as te:
raise TypeError('expected valid WSGI callable from error'
' handler (%r) WSGI wrapper (%r), instead'
' got issue: %r'
% (error_handler, wsgi_wrapper, te))
self._dispatch_wsgi = wrapped_wsgi
self.error_handler = error_handler
|
TypeError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/application.py/BaseApplication.set_error_handler
|
1,086
|
def make_tag_decorator(known_tags):
"""
Create a decorator allowing tests to be tagged with the *known_tags*.
"""
def tag(*tags):
"""
Tag a test method with the given tags.
Can be used in conjunction with the --tags command-line argument
for runtests.py.
"""
for t in tags:
if t not in known_tags:
raise ValueError("unknown tag: %r" % (t,))
def decorate(func):
if (not callable(func) or isinstance(func, type)
or not func.__name__.startswith('test_')):
raise TypeError("@tag(...) should be used on test methods")
try:
s = func.tags
except __HOLE__:
s = func.tags = set()
s.update(tags)
return func
return decorate
return tag
|
AttributeError
|
dataset/ETHPy150Open numba/numba/numba/testing/main.py/make_tag_decorator
|
1,087
|
def _choose_tagged_tests(tests, tags):
"""
Select tests that are tagged with at least one of the given tags.
"""
selected = []
tags = set(tags)
for test in _flatten_suite(tests):
assert isinstance(test, unittest.TestCase)
func = getattr(test, test._testMethodName)
try:
# Look up the method's underlying function (Python 2)
func = func.im_func
except AttributeError:
pass
try:
if func.tags & tags:
selected.append(test)
except __HOLE__:
# Test method doesn't have any tags
pass
return unittest.TestSuite(selected)
|
AttributeError
|
dataset/ETHPy150Open numba/numba/numba/testing/main.py/_choose_tagged_tests
|
1,088
|
def _refleak_cleanup():
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
try:
func2 = sys.gettotalrefcount
except __HOLE__:
func2 = lambda: 42
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
sys._clear_type_cache()
# This also clears the various internal CPython freelists.
gc.collect()
return func1(), func2()
|
AttributeError
|
dataset/ETHPy150Open numba/numba/numba/testing/main.py/_refleak_cleanup
|
1,089
|
def addSuccess(self, test):
try:
rc_deltas, alloc_deltas = self._huntLeaks(test)
except __HOLE__:
# Test failed when repeated
assert not self.wasSuccessful()
return
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= set((1, 0, -1)):
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas, item_name, sum(deltas))
failed = True
try:
raise ReferenceLeakError(msg)
except Exception:
exc_info = sys.exc_info()
if self.showAll:
self.stream.write("%s = %r " % (item_name, deltas))
self.addFailure(test, exc_info)
if not failed:
super(RefleakTestResult, self).addSuccess(test)
|
AssertionError
|
dataset/ETHPy150Open numba/numba/numba/testing/main.py/RefleakTestResult.addSuccess
|
1,090
|
def _run_parallel_tests(self, result, pool, child_runner):
remaining_ids = set(t.id() for t in self._ptests)
it = pool.imap_unordered(child_runner, self._ptests)
while True:
try:
child_result = it.__next__(self.timeout)
except __HOLE__:
return
except TimeoutError as e:
# Diagnose the names of unfinished tests
msg = ("%s [unfinished tests: %s]"
% (str(e), ", ".join(map(repr, sorted(remaining_ids))))
)
e.args = (msg,) + e.args[1:]
raise e
result.add_results(child_result)
remaining_ids.discard(child_result.test_id)
if child_result.shouldStop:
result.shouldStop = True
return
|
StopIteration
|
dataset/ETHPy150Open numba/numba/numba/testing/main.py/ParallelTestRunner._run_parallel_tests
|
1,091
|
def _get_db_name(db_settings, suffix):
"This provides the default test db name that Django uses."
from django import VERSION as DJANGO_VERSION
name = None
try:
if DJANGO_VERSION > (1, 7):
name = db_settings['TEST']['NAME']
elif DJANGO_VERSION < (1, 7):
name = db_settings['TEST_NAME']
except __HOLE__:
pass
if not name:
if db_settings['ENGINE'] == 'django.db.backends.sqlite3':
return ':memory:'
else:
name = 'test_' + db_settings['NAME']
if suffix:
name = '%s_%s' % (name, suffix)
return name
|
KeyError
|
dataset/ETHPy150Open pytest-dev/pytest-django/pytest_django/db_reuse.py/_get_db_name
|
1,092
|
def _parse_int(num):
try:
return num and int(num)
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/api/viewsets/briefcase_api.py/_parse_int
|
1,093
|
def GetVersionObject():
"""Gets the version of the SDK by parsing the VERSION file.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
try:
version_fh = open(version_filename)
except __HOLE__:
logging.error('Could not find version file at %s', version_filename)
return None
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/sdk_update_checker.py/GetVersionObject
|
1,094
|
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except (urllib2.URLError, socket.error, ssl.SSLError), e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(responses.values(), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except __HOLE__:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in responses.items():
api_versions = _GetSupportedApiVersions(response, runtime)
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
|
ValueError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/sdk_update_checker.py/SDKUpdateChecker.CheckForUpdates
|
1,095
|
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nag_filename)
except __HOLE__:
return None
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/sdk_update_checker.py/SDKUpdateChecker._ParseNagFile
|
1,096
|
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, __HOLE__), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/sdk_update_checker.py/SDKUpdateChecker._WriteNagFile
|
1,097
|
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except __HOLE__:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEquals(to_tuple(ast2), to_tuple(ast))
|
ImportError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_ast.py/AST_Tests.test_pickling
|
1,098
|
def copy_template(template_name, copy_to, **options):
"""copies the specified template directory to the copy_to location"""
import django_extensions
style = color_style()
ERROR = getattr(style, 'ERROR', lambda x: x)
SUCCESS = getattr(style, 'SUCCESS', lambda x: x)
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f).rstrip(".tmpl")
if os.path.exists(path_new):
if options.get('verbosity', 1) > 1:
print(ERROR("%s already exists" % path_new))
continue
if options.get('verbosity', 1) > 1:
print(SUCCESS("%s" % path_new))
with open(path_old, 'r') as fp_orig:
with open(path_new, 'w') as fp_new:
fp_new.write(fp_orig.read())
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except __HOLE__:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
|
OSError
|
dataset/ETHPy150Open django-extensions/django-extensions/django_extensions/management/commands/create_jobs.py/copy_template
|
1,099
|
def get_info(process=None, interval=0, with_childs=False):
"""Return information about a process. (can be an pid or a Process object)
If process is None, will return the information about the current process.
"""
# XXX moce get_info to circus.process ?
from circus.process import (get_children, get_memory_info,
get_cpu_percent, get_memory_percent,
get_cpu_times, get_nice, get_cmdline,
get_create_time, get_username)
if process is None or isinstance(process, int):
if process is None:
pid = os.getpid()
else:
pid = process
if pid in _PROCS:
process = _PROCS[pid]
else:
_PROCS[pid] = process = Process(pid)
info = {}
try:
mem_info = get_memory_info(process)
info['mem_info1'] = bytes2human(mem_info[0])
info['mem_info2'] = bytes2human(mem_info[1])
except AccessDenied:
info['mem_info1'] = info['mem_info2'] = "N/A"
try:
info['cpu'] = get_cpu_percent(process, interval=interval)
except AccessDenied:
info['cpu'] = "N/A"
try:
info['mem'] = round(get_memory_percent(process), 1)
except AccessDenied:
info['mem'] = "N/A"
try:
cpu_times = get_cpu_times(process)
ctime = timedelta(seconds=sum(cpu_times))
ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60,
str((ctime.seconds % 60)).zfill(2),
str(ctime.microseconds)[:2])
except AccessDenied:
ctime = "N/A"
info['ctime'] = ctime
try:
info['pid'] = process.pid
except AccessDenied:
info['pid'] = 'N/A'
try:
info['username'] = get_username(process)
except AccessDenied:
info['username'] = 'N/A'
try:
info['nice'] = get_nice(process)
except AccessDenied:
info['nice'] = 'N/A'
except NoSuchProcess:
info['nice'] = 'Zombie'
try:
raw_cmdline = get_cmdline(process)
cmdline = os.path.basename(
shlex.split(raw_cmdline[0], posix=not IS_WINDOWS)[0]
)
except (AccessDenied, IndexError):
cmdline = "N/A"
try:
info['create_time'] = get_create_time(process)
except AccessDenied:
info['create_time'] = 'N/A'
try:
info['age'] = time.time() - get_create_time(process)
except __HOLE__:
info['create_time'] = get_create_time(process)
except AccessDenied:
info['age'] = 'N/A'
info['cmdline'] = cmdline
info['children'] = []
if with_childs:
for child in get_children(process):
info['children'].append(get_info(child, interval=interval))
return info
|
TypeError
|
dataset/ETHPy150Open circus-tent/circus/circus/util.py/get_info
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.