Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
1,600
|
def _randint(seed=None):
"""Return a randint generator. ``seed`` can be
o None - return randomly seeded generator
o int - return a generator seeded with the int
o list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.utilities.randtest import _randint
>>> ri = _randint()
>>> ri(1, 1000) # doctest: +SKIP
999
>>> ri = _randint(3)
>>> ri(1, 1000) # doctest: +SKIP
238
>>> ri = _randint([0, 5, 1, 2, 4])
>>> ri(1, 3), ri(1, 3)
(1, 2)
"""
if seed is None:
return random.randint
elif isinstance(seed, int):
return random.Random(seed).randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b, seq=seed):
a, b = as_int(a), as_int(b)
w = b - a
if w < 0:
raise ValueError('_randint got empty range')
try:
x = seq.pop()
except __HOLE__:
raise ValueError('_randint expects a list-like sequence')
except IndexError:
raise ValueError('_randint sequence was too short')
if a <= x <= b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randint got an unexpected seed')
|
AttributeError
|
dataset/ETHPy150Open sympy/sympy/sympy/utilities/randtest.py/_randint
|
1,601
|
def gb(args):
"""
%prog gb gffile fastafile
Convert GFF3 to Genbank format. Recipe taken from:
<http://www.biostars.org/p/2492/>
"""
from Bio.Alphabet import generic_dna
try:
from BCBio import GFF
except __HOLE__:
print >> sys.stderr, "You need to install dep first: $ easy_install bcbio-gff"
p = OptionParser(gb.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, fasta_file = args
pf = op.splitext(gff_file)[0]
out_file = pf + ".gb"
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna))
gff_iter = GFF.parse(gff_file, fasta_input)
SeqIO.write(gff_iter, out_file, "genbank")
|
ImportError
|
dataset/ETHPy150Open tanghaibao/jcvi/formats/gff.py/gb
|
1,602
|
def gtf(args):
"""
%prog gtf gffile
Convert gff to gtf file. In gtf, only exon/CDS features are important. The
first 8 columns are the same as gff, but in the attributes field, we need to
specify "gene_id" and "transcript_id".
"""
p = OptionParser(gtf.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = Gff(gffile)
transcript_info = AutoVivification()
for g in gff:
if g.type.endswith("RNA") or g.type.endswith("transcript"):
if "ID" in g.attributes and "Parent" in g.attributes:
transcript_id = g.get_attr("ID")
gene_id = g.get_attr("Parent")
elif "mRNA" in g.attributes and "Gene" in g.attributes:
transcript_id = g.get_attr("mRNA")
gene_id = g.get_attr("Gene")
else:
transcript_id = g.get_attr("ID")
gene_id = transcript_id
transcript_info[transcript_id]["gene_id"] = gene_id
transcript_info[transcript_id]["gene_type"] = g.type
continue
if g.type not in valid_gff_to_gtf_type.keys():
continue
try:
transcript_id = g.get_attr("Parent", first=False)
except __HOLE__:
transcript_id = g.get_attr("mRNA", first=False)
g.type = valid_gff_to_gtf_type[g.type]
for tid in transcript_id:
gene_type = transcript_info[tid]["gene_type"]
if not gene_type.endswith("RNA") and not gene_type.endswith("transcript"):
continue
gene_id = transcript_info[tid]["gene_id"]
g.attributes = dict(gene_id=[gene_id], transcript_id=[tid])
g.update_attributes(gtf=True, urlquote=False)
print g
|
IndexError
|
dataset/ETHPy150Open tanghaibao/jcvi/formats/gff.py/gtf
|
1,603
|
def __repr__(self):
try:
ContentTypeText = ContentType._VALUES_TO_NAMES[self.contentType]
except __HOLE__:
#print "*** Unknow Content Type", self.contentType
ContentTypeText = self.contentType
return 'LineMessage (contentType=%s, sender=%s, receiver=%s, msg="%s")' % (
ContentTypeText,
self.sender,
self.receiver,
self.text
)
|
KeyError
|
dataset/ETHPy150Open carpedm20/LINE/line/models.py/LineMessage.__repr__
|
1,604
|
def __init__(self, client, group=None, is_joined=True):
"""LineGroup init
:param client: LineClient instance
:param group: Group instace
:param is_joined: is a user joined or invited to a group
"""
self._client = client
self._group = group
self.id = group.id
self.name = group.name
self.is_joined = is_joined
try:
self.creator = LineContact(client, group.creator)
except:
self.creator = None
self.members = [LineContact(client, member) for member in group.members]
try:
self.invitee = [LineContact(client, member) for member in group.invitee]
except __HOLE__:
self.invitee = []
|
TypeError
|
dataset/ETHPy150Open carpedm20/LINE/line/models.py/LineGroup.__init__
|
1,605
|
@patch('kombu.transport.virtual.emergency_dump_state')
@patch(PRINT_FQDN)
def test_restore_unacked_once_when_unrestored(self, print_,
emergency_dump_state):
q = self.channel.qos
q._flush = Mock()
class State(dict):
restored = False
q._delivered = State({1: 1})
ru = q.restore_unacked = Mock()
exc = None
try:
raise KeyError()
except __HOLE__ as exc_:
exc = exc_
ru.return_value = [(exc, 1)]
self.channel.do_restore = True
q.restore_unacked_once()
print_.assert_called()
emergency_dump_state.assert_called()
|
KeyError
|
dataset/ETHPy150Open celery/kombu/kombu/tests/transport/virtual/test_base.py/test_Channel.test_restore_unacked_once_when_unrestored
|
1,606
|
def authenticate(self, site, site_url, assertion):
"""Verifies BrowserID assertion
Returns:
Object that represents a user with an email verified by
the assertion. If a user with such email does not exists,
but there are open locations that require login, the user
object is created. In other cases, None is returned.
Raises:
AuthenticationError: verification failed.
"""
try:
result = self.verifier.verify(assertion=assertion,
audience=site_url)
except BrowserIDException as ex:
return AuthenticationError(
'Failed to contact Persona verification service')
if not result:
raise AuthenticationError(
'BrowserID assertion verification failed.')
user = site.users.find_item_by_email(result.email)
if user is not None:
return user
try:
# The site has open locations that require login, every
# user needs to be allowed.
#
# TODO: user objects created in such way should probably
# be marked and automatically deleted on logout or after
# some time of inactivity.
if site.locations.has_open_location_with_login():
return site.users.create_item(result.email)
else:
return None
except __HOLE__ as ex:
raise AuthenticationError(', '.join(ex.messages))
except LimitExceeded as ex:
raise AuthenticationError(str(ex))
|
ValidationError
|
dataset/ETHPy150Open wrr/wwwhisper/wwwhisper_auth/backend.py/BrowserIDBackend.authenticate
|
1,607
|
def authenticate(self, site, site_url, token):
"""Token was a part of a login url that proves email ownership.
Returns:
Object that represents a user with the verified email
encoded in the token. If a user with such email does not
exists, but there are open locations that require login,
the user object is created. In other cases, None is
returned.
Raises:
AuthenticationError: token is invalid, expired or
generated for a different site. Token is valid, but the
user does not exist yet and can't be added because user
limit is exceeded (this can happen only if site has open
locations that require login).
"""
verified_email = login_token.load_login_token(site, site_url, token)
if verified_email is None:
raise AuthenticationError('Token invalid or expired.')
user = site.users.find_item_by_email(verified_email)
if user is not None:
return user
try:
# The site has open locations that require login, every
# user needs to be allowed.
#
# TODO: user objects created in such way should probably
# be marked and automatically deleted on logout or after
# some time of inactivity.
if site.locations.has_open_location_with_login():
return site.users.create_item(verified_email)
else:
return None
except __HOLE__ as ex:
# Should not happen, because email in the signed token is
# validated before the token is generated.
raise AuthenticationError(', '.join(ex.messages))
except LimitExceeded as ex:
raise AuthenticationError(str(ex))
|
ValidationError
|
dataset/ETHPy150Open wrr/wwwhisper/wwwhisper_auth/backend.py/VerifiedEmailBackend.authenticate
|
1,608
|
def close(self):
if 'less' in self.cmd:
self.write("press q to quit")
if self.proc:
self.file.close()
try:
self.proc.wait()
except __HOLE__:
sys.proc.kill()
sys.exit(1)
|
KeyboardInterrupt
|
dataset/ETHPy150Open jsmits/github-cli/src/github/utils.py/Pager.close
|
1,609
|
def clear(self):
"""
Removes all modes from the editor. All modes are removed from list
and deleted.
"""
import sys
while len(self._modes):
key = sorted(list(self._modes.keys()))[0]
mode = self.remove(key)
refcount = sys.getrefcount(mode)
if refcount > 2:
try:
import objgraph
except __HOLE__:
_logger().warning(
'potential memory leak detected on mode %r...\n'
'Install the objgraph package to know what objects are'
' holding references the mode.' % mode)
else:
_logger().warning(
'potential memory leak detected on mode: %r.\n'
'see stderr for a backrefs dot graph...' % mode)
objgraph.show_backrefs([mode], output=sys.stderr)
del mode
|
ImportError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/managers/modes.py/ModesManager.clear
|
1,610
|
def _instance_or_null(self, instance_class, json):
if json is None:
return NullObject(instance_class.__name__)
if not isinstance(json, dict):
return exceptions.UnprocessableResponseBody(
"GitHub's API returned a body that could not be handled", json
)
try:
return instance_class(json, self)
except __HOLE__: # instance_class is not a subclass of GitHubCore
return instance_class(json)
|
TypeError
|
dataset/ETHPy150Open sigmavirus24/github3.py/github3/models.py/GitHubCore._instance_or_null
|
1,611
|
def type_match(self,value):
is_match = 0
try:
if value.IsA('vtkObject'):
is_match = 1
except __HOLE__:
pass
return is_match
|
AttributeError
|
dataset/ETHPy150Open scipy/scipy/scipy/weave/vtk_spec.py/vtk_converter.type_match
|
1,612
|
def test_evaluations():
# Create an Evaluation
name = 'Test Evaluation %s' % str(uuid.uuid4())
ev = Evaluation(name=name, description='Evaluation for testing',
contentSource=project['id'], status='CLOSED')
ev = syn.store(ev)
try:
# -- Get the Evaluation by name
evalNamed = syn.getEvaluationByName(name)
assert ev['contentSource'] == evalNamed['contentSource']
assert ev['createdOn'] == evalNamed['createdOn']
assert ev['description'] == evalNamed['description']
assert ev['etag'] == evalNamed['etag']
assert ev['id'] == evalNamed['id']
assert ev['name'] == evalNamed['name']
assert ev['ownerId'] == evalNamed['ownerId']
assert ev['status'] == evalNamed['status']
# -- Get the Evaluation by project
evalProj = syn.getEvaluationByContentSource(project)
evalProj = next(evalProj)
assert ev['contentSource'] == evalProj['contentSource']
assert ev['createdOn'] == evalProj['createdOn']
assert ev['description'] == evalProj['description']
assert ev['etag'] == evalProj['etag']
assert ev['id'] == evalProj['id']
assert ev['name'] == evalProj['name']
assert ev['ownerId'] == evalProj['ownerId']
assert ev['status'] == evalProj['status']
# Update the Evaluation
ev['status'] = 'OPEN'
ev = syn.store(ev, createOrUpdate=True)
assert ev.status == 'OPEN'
# # Add the current user as a participant
myOwnerId = int(syn.getUserProfile()['ownerId'])
syn._allowParticipation(ev, myOwnerId)
# AUTHENTICATED_USERS = 273948
# PUBLIC = 273949
syn.setPermissions(ev, 273948, accessType=['READ'])
syn.setPermissions(ev, 273949, accessType=['READ'])
# test getPermissions
permissions = syn.getPermissions(ev, 273949)
assert ['READ'] == permissions
permissions = syn.getPermissions(ev, syn.getUserProfile()['ownerId'])
assert [p in permissions for p in ['READ', 'CREATE', 'DELETE', 'UPDATE', 'CHANGE_PERMISSIONS', 'READ_PRIVATE_SUBMISSION']]
# Test getSubmissions with no Submissions (SYNR-453)
submissions = syn.getSubmissions(ev)
assert len(list(submissions)) == 0
# -- Get a Submission attachment belonging to another user (SYNR-541) --
# See if the configuration contains test authentication
try:
config = configparser.ConfigParser()
config.read(client.CONFIG_FILE)
other_user = {}
other_user['username'] = config.get('test-authentication', 'username')
other_user['password'] = config.get('test-authentication', 'password')
print("Testing SYNR-541")
# Login as the test user
testSyn = client.Synapse(skip_checks=True)
testSyn.login(email=other_user['username'], password=other_user['password'])
testOwnerId = int(testSyn.getUserProfile()['ownerId'])
# Make a project
other_project = Project(name=str(uuid.uuid4()))
other_project = testSyn.createEntity(other_project)
# Give the test user permission to read and join the evaluation
syn._allowParticipation(ev, testOwnerId)
# Make a file to submit
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
filename = f.name
f.write(str(random.gauss(0,1)) + '\n')
f = File(filename, parentId=other_project.id,
name='Submission 999',
description ="Haha! I'm inaccessible...")
entity = testSyn.store(f)
## test submission by evaluation ID
submission = testSyn.submit(ev.id, entity, submitterAlias="My Nickname")
# Mess up the cached file so that syn._getWithEntityBundle must download again
os.utime(filename, (0, 0))
# Grab the Submission as the original user
fetched = syn.getSubmission(submission['id'])
assert os.path.exists(fetched['filePath'])
# make sure the fetched file is the same as the original (PLFM-2666)
assert filecmp.cmp(filename, fetched['filePath'])
except configparser.Error:
print('Skipping test for SYNR-541: No [test-authentication] in %s' % client.CONFIG_FILE)
# Increase this to fully test paging by getEvaluationSubmissions
# not to be less than 2
num_of_submissions = 2
# Create a bunch of Entities and submit them for scoring
print("Creating Submissions")
for i in range(num_of_submissions):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
filename = f.name
f.write(str(random.gauss(0,1)) + '\n')
f = File(filename, parentId=project.id, name='entry-%02d' % i,
description='An entry for testing evaluation')
entity=syn.store(f)
syn.submit(ev, entity, name='Submission %02d' % i, submitterAlias='My Team')
# Score the submissions
submissions = syn.getSubmissions(ev, limit=num_of_submissions-1)
print("Scoring Submissions")
for submission in submissions:
assert re.match('Submission \d+', submission['name'])
status = syn.getSubmissionStatus(submission)
status.score = random.random()
if submission['name'] == 'Submission 01':
status.status = 'INVALID'
status.report = 'Uh-oh, something went wrong!'
else:
status.status = 'SCORED'
status.report = 'a fabulous effort!'
syn.store(status)
# Annotate the submissions
print("Annotating Submissions")
bogosity = {}
submissions = syn.getSubmissions(ev)
b = 123
for submission, status in syn.getSubmissionBundles(ev):
bogosity[submission.id] = b
a = dict(foo='bar', bogosity=b)
b += 123
status['annotations'] = to_submission_status_annotations(a)
set_privacy(status['annotations'], key='bogosity', is_private=False)
syn.store(status)
# Test that the annotations stuck
for submission, status in syn.getSubmissionBundles(ev):
a = from_submission_status_annotations(status.annotations)
assert a['foo'] == 'bar'
assert a['bogosity'] == bogosity[submission.id]
for kvp in status.annotations['longAnnos']:
if kvp['key'] == 'bogosity':
assert kvp['isPrivate'] == False
# test query by submission annotations
# These queries run against an eventually consistent index table which is
# populated by an asynchronous worker. Thus, the queries may remain out
# of sync for some unbounded, but assumed to be short time.
attempts = 2
while attempts > 0:
try:
print("Querying for submissions")
results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s" % ev.id)
print(results)
assert results[u'totalNumberOfResults'] == num_of_submissions+1
results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s where bogosity > 200" % ev.id)
print(results)
assert results[u'totalNumberOfResults'] == num_of_submissions
except __HOLE__ as ex1:
print("failed query: ", ex1)
attempts -= 1
if attempts > 0: print("retrying...")
time.sleep(2)
else:
attempts = 0
## Test that we can retrieve submissions with a specific status
invalid_submissions = list(syn.getSubmissions(ev, status='INVALID'))
assert len(invalid_submissions) == 1, len(invalid_submissions)
assert invalid_submissions[0]['name'] == 'Submission 01'
finally:
# Clean up
syn.delete(ev)
if 'testSyn' in locals():
if 'other_project' in locals():
# Clean up, since the current user can't access this project
# This also removes references to the submitted object :)
testSyn.delete(other_project)
if 'team' in locals():
## remove team
testSyn.delete(team)
## Just deleted it. Shouldn't be able to get it.
assert_raises(SynapseHTTPError, syn.getEvaluation, ev)
|
AssertionError
|
dataset/ETHPy150Open Sage-Bionetworks/synapsePythonClient/tests/integration/test_evaluations.py/test_evaluations
|
1,613
|
def test_teams():
name = "My Uniquely Named Team " + str(uuid.uuid4())
team = syn.store(Team(name=name, description="A fake team for testing..."))
schedule_for_cleanup(team)
found_team = syn.getTeam(team.id)
assert team == found_team
p = syn.getUserProfile()
found = None
for m in syn.getTeamMembers(team):
if m.member.ownerId == p.ownerId:
found = m
break
assert found is not None, "Couldn't find user {} in team".format(p.username)
## needs to be retried 'cause appending to the search index is asynchronous
tries = 10
found_team = None
while tries > 0:
try:
found_team = syn.getTeam(name)
break
except __HOLE__:
tries -= 1
if tries > 0: time.sleep(1)
assert team == found_team
|
ValueError
|
dataset/ETHPy150Open Sage-Bionetworks/synapsePythonClient/tests/integration/test_evaluations.py/test_teams
|
1,614
|
def __init__(self, _exception):
try:
data = self._parser.match(_exception.message).groupdict()
except __HOLE__:
raise _exception
self._instance = data['ins']
self._caught = data['cg']
self._exception = data['ex']
self._primary_error_code = data['pec']
self._secondary_error_code = data['sec']
self._error_string = data['es']
|
AttributeError
|
dataset/ETHPy150Open tdevelioglu/python-f5/f5/exceptions.py/BigSudsExceptionParser.__init__
|
1,615
|
def make_path(self, parameter_name):
"""Parse a parameter name and build a full path to a message value.
The path of a method is a tuple of 2-tuples describing the names and
indexes within repeated fields from the root message (the message being
constructed by the builder) to an arbitrarily nested message within it.
Each 2-tuple node of a path (name, index) is:
name: The name of the field that refers to the message instance.
index: The index within a repeated field that refers to the message
instance, None if not a repeated field.
For example, consider:
class VeryInner(messages.Message):
...
class Inner(messages.Message):
very_inner = messages.MessageField(VeryInner, 1, repeated=True)
class Outer(messages.Message):
inner = messages.MessageField(Inner, 1)
If this builder is building an instance of Outer, that instance is
referred to in the URL encoded parameters without a path. Therefore
its path is ().
The child 'inner' is referred to by its path (('inner', None)).
The first child of repeated field 'very_inner' on the Inner instance
is referred to by (('inner', None), ('very_inner', 0)).
Examples:
# Correct reference to model where nation is a Message, district is
# repeated Message and county is any not repeated field type.
>>> make_path('nation.district-2.county')
(('nation', None), ('district', 2), ('county', None))
# Field is not part of model.
>>> make_path('nation.made_up_field')
None
# nation field is not repeated and index provided.
>>> make_path('nation-1')
None
# district field is repeated and no index provided.
>>> make_path('nation.district')
None
Args:
parameter_name: Name of query parameter as passed in from the request.
in order to make a path, this parameter_name must point to a valid
field within the message structure. Nodes of the path that refer to
repeated fields must be indexed with a number, non repeated nodes must
not have an index.
Returns:
Parsed version of the parameter_name as a tuple of tuples:
attribute: Name of attribute associated with path.
index: Postitive integer index when it is a repeated field, else None.
Will return None if the parameter_name does not have the right prefix,
does not point to a field within the message structure, does not have
an index if it is a repeated field or has an index but is not a repeated
field.
"""
if parameter_name.startswith(self.__parameter_prefix):
parameter_name = parameter_name[len(self.__parameter_prefix):]
else:
return None
path = []
name = []
message_type = type(self.__messages[()]) # Get root message.
for item in parameter_name.split('.'):
# This will catch sub_message.real_message_field.not_real_field
if not message_type:
return None
item_match = _FIELD_NAME_REGEX.match(item)
if not item_match:
return None
attribute = item_match.group(1)
index = item_match.group(2)
if index:
index = int(index)
try:
field = message_type.field_by_name(attribute)
except __HOLE__:
return None
if field.repeated != (index is not None):
return None
if isinstance(field, messages.MessageField):
message_type = field.type
else:
message_type = None
# Path is valid so far. Append node and continue.
path.append((attribute, index))
return tuple(path)
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/protorpc/protorpc/protourlencode.py/URLEncodedRequestBuilder.make_path
|
1,616
|
def add_parameter(self, parameter, values):
"""Add a single parameter.
Adds a single parameter and its value to the request message.
Args:
parameter: Query string parameter to map to request.
values: List of values to assign to request message.
Returns:
True if parameter was valid and added to the message, else False.
Raises:
DecodeError if the parameter refers to a valid field, and the values
parameter does not have one and only one value. Non-valid query
parameters may have multiple values and should not cause an error.
"""
path = self.make_path(parameter)
if not path:
return False
# Must check that all indexes of all items in the path are correct before
# instantiating any of them. For example, consider:
#
# class Repeated(object):
# ...
#
# class Inner(object):
#
# repeated = messages.MessageField(Repeated, 1, repeated=True)
#
# class Outer(object):
#
# inner = messages.MessageField(Inner, 1)
#
# instance = Outer()
# builder = URLEncodedRequestBuilder(instance)
# builder.add_parameter('inner.repeated')
#
# assert not hasattr(instance, 'inner')
#
# The check is done relative to the instance of Outer pass in to the
# constructor of the builder. This instance is not referred to at all
# because all names are assumed to be relative to it.
#
# The 'repeated' part of the path is not correct because it is missing an
# index. Because it is missing an index, it should not create an instance
# of Repeated. In this case add_parameter will return False and have no
# side effects.
#
# A correct path that would cause a new Inner instance to be inserted at
# instance.inner and a new Repeated instance to be appended to the
# instance.inner.repeated list would be 'inner.repeated-0'.
if not self.__check_indexes(path):
return False
# Ok to build objects.
parent_path = path[:-1]
parent = self.__get_or_create_path(parent_path)
name, index = path[-1]
field = parent.field_by_name(name)
if len(values) != 1:
raise messages.DecodeError(
'Found repeated values for field %s.' % field.name)
value = values[0]
if isinstance(field, messages.IntegerField):
converted_value = int(value)
elif isinstance(field, message_types.DateTimeField):
try:
converted_value = util.decode_datetime(value)
except __HOLE__, e:
raise messages.DecodeError(e)
elif isinstance(field, messages.MessageField):
# Just make sure it's instantiated. Assignment to field or
# appending to list is done in __get_or_create_path.
self.__get_or_create_path(path)
return True
elif isinstance(field, messages.StringField):
converted_value = value.decode('utf-8')
elif isinstance(field, messages.BooleanField):
converted_value = value.lower() == 'true' and True or False
else:
try:
converted_value = field.type(value)
except TypeError:
raise messages.DecodeError('Invalid enum value "%s"' % value)
if field.repeated:
value_list = getattr(parent, field.name, None)
if value_list is None:
setattr(parent, field.name, [converted_value])
else:
if index == len(value_list):
value_list.append(converted_value)
else:
# Index should never be above len(value_list) because it was
# verified during the index check above.
value_list[index] = converted_value
else:
setattr(parent, field.name, converted_value)
return True
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/protorpc/protorpc/protourlencode.py/URLEncodedRequestBuilder.add_parameter
|
1,617
|
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True):
"""
Checks that the code in FILENAME parses, attempting to autodetect
the language if necessary.
Raises IOError if the file cannot be read.
Raises DXSyntaxError if there is a problem and "enforce" is True.
"""
def check_python(filename):
# Generate a semi-recognizable name to write the pyc to. Of
# course it's possible that different files being scanned could
# have the same basename, so this path won't be unique, but the
# checks don't run concurrently so this shouldn't cause any
# problems.
pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc")
try:
if USING_PYTHON2:
filename = filename.encode(sys.getfilesystemencoding())
py_compile.compile(filename, cfile=pyc_path, doraise=True)
finally:
try:
os.unlink(pyc_path)
except __HOLE__:
pass
def check_bash(filename):
subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT)
if override_lang == 'python2.7':
checker_fn = check_python
elif override_lang == 'bash':
checker_fn = check_bash
elif filename.endswith('.py'):
checker_fn = check_python
elif filename.endswith('.sh'):
checker_fn = check_bash
else:
# Ignore other kinds of files.
return
# Do a test read of the file to catch errors like the file not
# existing or not being readable.
open(filename)
try:
checker_fn(filename)
except subprocess.CalledProcessError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
for line in e.output.strip("\n").split("\n"):
print(" " + line.rstrip("\n"), file=sys.stderr)
if enforce:
raise DXSyntaxError(filename + " has a syntax error")
except py_compile.PyCompileError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
print(" " + e.msg.strip(), file=sys.stderr)
if enforce:
raise DXSyntaxError(e.msg.strip())
|
OSError
|
dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/scripts/dx_build_app.py/_check_file_syntax
|
1,618
|
def _verify_app_source_dir_impl(src_dir, temp_dir, mode, enforce=True):
"""Performs syntax and lint checks on the app source.
Precondition: the dxapp.json file exists and can be parsed.
"""
_lint(os.path.join(src_dir, "dxapp.json"), mode)
# Check that the entry point file parses as the type it is going to
# be interpreted as. The extension is irrelevant.
manifest = json.load(open(os.path.join(src_dir, "dxapp.json")))
if "runSpec" in manifest:
if "interpreter" not in manifest['runSpec']:
raise dxpy.app_builder.AppBuilderException('runSpec.interpreter field was not present')
if manifest['runSpec']['interpreter'] in ["python2.7", "bash"]:
if "file" in manifest['runSpec']:
entry_point_file = os.path.abspath(os.path.join(src_dir, manifest['runSpec']['file']))
try:
_check_file_syntax(entry_point_file, temp_dir, override_lang=manifest['runSpec']['interpreter'], enforce=enforce)
except __HOLE__ as e:
raise dxpy.app_builder.AppBuilderException(
'Could not open runSpec.file=%r. The problem was: %s' % (entry_point_file, e))
except DXSyntaxError:
raise dxpy.app_builder.AppBuilderException('Entry point file %s has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.' % (entry_point_file,))
elif "code" in manifest['runSpec']:
try:
_check_syntax(manifest['runSpec']['code'], manifest['runSpec']['interpreter'], temp_dir, enforce=enforce)
except DXSyntaxError:
raise dxpy.app_builder.AppBuilderException('Code in runSpec.code has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.')
if 'execDepends' in manifest['runSpec']:
if not isinstance(manifest['runSpec']['execDepends'], list):
raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array. Rerun with --no-check-syntax to proceed anyway.')
if not all(isinstance(dep, dict) for dep in manifest['runSpec']['execDepends']):
raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array of hashes. Rerun with --no-check-syntax to proceed anyway.')
if any(dep.get('package_manager', 'apt') != 'apt' for dep in manifest['runSpec']['execDepends']):
if not isinstance(manifest.get('access'), dict) or 'network' not in manifest['access']:
msg = '\n'.join(['runSpec.execDepends specifies non-APT dependencies, but no network access spec is given.',
'Add {"access": {"network": ["*"]}} to allow dependencies to install.',
'See https://wiki.dnanexus.com/Developer-Tutorials/Request-Additional-App-Resources#Network-Access.',
'Rerun with --no-check-syntax to proceed anyway.'])
raise dxpy.app_builder.AppBuilderException(msg)
if 'authorizedUsers' in manifest:
if not isinstance(manifest['authorizedUsers'], list) or isinstance(manifest['authorizedUsers'], basestring):
raise dxpy.app_builder.AppBuilderException('Expected authorizedUsers to be a list of strings')
for thing in manifest['authorizedUsers']:
if thing != 'PUBLIC' and (not isinstance(thing, basestring) or not re.match("^(org-|user-)", thing)):
raise dxpy.app_builder.AppBuilderException('authorizedUsers field contains an entry which is not either the string "PUBLIC" or a user or org ID')
# Check all other files that are going to be in the resources tree.
# For these we detect the language based on the filename extension.
# Obviously this check can have false positives, since the app can
# execute (or not execute!) all these files in whatever way it
# wishes, e.g. it could use Python != 2.7 or some non-bash shell.
# Consequently errors here are non-fatal.
files_with_problems = []
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(src_dir, "resources"))):
for filename in filenames:
# On Mac OS, the resource fork for "FILE.EXT" gets tarred up
# as a file named "._FILE.EXT". To a naive check this
# appears to be a file of the same extension. Therefore, we
# exclude these from syntax checking since they are likely
# to not parse as whatever language they appear to be.
if not filename.startswith("._"):
try:
_check_file_syntax(os.path.join(dirpath, filename), temp_dir, enforce=True)
except IOError as e:
raise dxpy.app_builder.AppBuilderException(
'Could not open file in resources directory %r. The problem was: %s' %
(os.path.join(dirpath, filename), e)
)
except DXSyntaxError:
# Suppresses errors from _check_file_syntax so we
# only print a nice error message
files_with_problems.append(os.path.join(dirpath, filename))
if files_with_problems:
# Make a message of the form:
# "/path/to/my/app.py"
# OR "/path/to/my/app.py and 3 other files"
files_str = files_with_problems[0] if len(files_with_problems) == 1 else (files_with_problems[0] + " and " + str(len(files_with_problems) - 1) + " other file" + ("s" if len(files_with_problems) > 2 else ""))
logging.warn('%s contained syntax errors, see above for details' % (files_str,))
|
IOError
|
dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/scripts/dx_build_app.py/_verify_app_source_dir_impl
|
1,619
|
def _subtractAndCompareToZero(a, b, op):
"""Helper function for comparison operators.
Subtracts b from a, exactly if possible, and compares the
result with 0 using op, in such a way that the comparison
won't recurse. If the difference raises a TypeError, returns
NotImplemented instead.
"""
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
b = a.from_float(b)
try:
# XXX: If b <: Real but not <: Rational, this is likely
# to fall back to a float. If the actual values differ by
# less than MIN_FLOAT, this could falsely call them equal,
# which would make <= inconsistent with ==. Better ways of
# doing this are welcome.
diff = a - b
except __HOLE__:
return NotImplemented
if isinstance(diff, Rational):
return op(diff.numerator, 0)
return op(diff, 0)
|
TypeError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/fractions.py/Fraction._subtractAndCompareToZero
|
1,620
|
def serialize_comments(self, comments):
result = {}
serialized_comments = \
super(ImageReviewUI, self).serialize_comments(comments)
for serialized_comment in serialized_comments:
try:
position = '%(x)sx%(y)s+%(width)s+%(height)s' \
% serialized_comment
except __HOLE__:
# It's possible this comment was made before the review UI
# was provided, meaning it has no data. If this is the case,
# ignore this particular comment, since it doesn't have a
# region.
continue
result.setdefault(position, []).append(serialized_comment)
return result
|
KeyError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/reviews/ui/image.py/ImageReviewUI.serialize_comments
|
1,621
|
def get_comment_thumbnail(self, comment):
try:
x = int(comment.extra_data['x'])
y = int(comment.extra_data['y'])
width = int(comment.extra_data['width'])
height = int(comment.extra_data['height'])
except (__HOLE__, ValueError):
# This may be a comment from before we had review UIs. Or,
# corrupted data. Either way, don't display anything.
return None
image_url = crop_image(comment.file_attachment.file,
x, y, width, height)
if not urlparse(image_url).netloc:
image_url = build_server_url(image_url)
image_html = (
'<img class="modified-image" src="%s" width="%s" height="%s" '
'alt="%s" />'
% (image_url, width, height, escape(comment.text)))
if comment.diff_against_file_attachment_id:
diff_against_image_url = crop_image(
comment.diff_against_file_attachment.file,
x, y, width, height)
diff_against_image_html = (
'<img class="orig-image" src="%s" width="%s" '
'height="%s" alt="%s" />'
% (diff_against_image_url, width, height,
escape(comment.text)))
return ('<div class="image-review-ui-diff-thumbnail">%s%s</div>'
% (diff_against_image_html, image_html))
else:
return image_html
|
KeyError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/reviews/ui/image.py/ImageReviewUI.get_comment_thumbnail
|
1,622
|
def camshift():
cam = Camera()
img = cam.getImage()
d = Display(img.size())
bb1 = getBBFromUser(cam,d)
fs1=[]
while True:
try:
img1 = cam.getImage()
fs1 = img1.track("camshift",fs1,img,bb1,num_frames=5, nframes=60, lower=(0, 40, 40), upper=(80, 200, 200))
fs1.drawBB()
fs1.drawPath()
fs1.showCoordinates()
fs1.showSizeRatio()
fs1.showPixelVelocity()
fs1.showPixelVelocityRT()
img1.show()
except __HOLE__:
print "Total number of frames tracked",
print fs1.trackLength()
print fs1.processTrack(foo)
break
|
KeyboardInterrupt
|
dataset/ETHPy150Open sightmachine/SimpleCV/SimpleCV/examples/tracking/camshift.py/camshift
|
1,623
|
def getBBFromUser(cam, d):
p1 = None
p2 = None
img = cam.getImage()
while d.isNotDone():
try:
img = cam.getImage()
img.save(d)
dwn = d.leftButtonDownPosition()
up = d.leftButtonUpPosition()
if dwn:
p1 = dwn
if up:
p2 = up
break
time.sleep(0.05)
except __HOLE__:
break
print p1,p2
if not p1 or not p2:
return None
xmax = np.max((p1[0],p2[0]))
xmin = np.min((p1[0],p2[0]))
ymax = np.max((p1[1],p2[1]))
ymin = np.min((p1[1],p2[1]))
print xmin,ymin,xmax,ymax
return (xmin,ymin,xmax-xmin,ymax-ymin)
|
KeyboardInterrupt
|
dataset/ETHPy150Open sightmachine/SimpleCV/SimpleCV/examples/tracking/camshift.py/getBBFromUser
|
1,624
|
def finalize_order(request):
'''Helper function that actually complete the order when the
payment provider tells us so.
'''
order_id = provider.get_order_id(request)
order = Order.objects.get(pk=order_id)
#Order is already completed
if order.payment_done:
return
#Simulate the cart for the order.complete, and order_handler that needs it
try:
cart_id = provider.get_cart_id(request)
request.cart = Cart.objects.get(id=cart_id)
except (NotImplementedError, Cart.DoesNotExist, __HOLE__):
pass
#Recreate an order form for the order handler
data = checkout.initial_order_data(request)
data["step"] = checkout.CHECKOUT_STEP_LAST
order_form_class = get_callable(settings.SHOP_CHECKOUT_FORM_CLASS)
form = order_form_class(request, step=checkout.CHECKOUT_STEP_LAST, data=data)
form.instance = order
form.full_clean()
request.session["order"] = dict(form.cleaned_data)
order.transaction_id = provider.get_transaction_id(request)
order.payment_done = True
order.complete(request)
order_handler(request, form, order)
checkout.send_order_email(request, order)
|
TypeError
|
dataset/ETHPy150Open thomasWajs/cartridge-external-payment/cartridge_external_payment/views.py/finalize_order
|
1,625
|
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
try:
assert len(data) >= len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
except __HOLE__:
logging.warning(
'Invalid user data: %r. Expected attributes: %r.' %
(data, self.session_attributes))
return None
|
AssertionError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webapp2-2.3/webapp2_extras/auth.py/AuthStore.serialize_session
|
1,626
|
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
try:
assert len(data) >= len(self.session_attributes)
return dict(zip(self.session_attributes, data))
except __HOLE__:
logging.warning(
'Invalid user data: %r. Expected attributes: %r.' %
(data, self.session_attributes))
return None
# Validators --------------------------------------------------------------
|
AssertionError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webapp2-2.3/webapp2_extras/auth.py/AuthStore.deserialize_session
|
1,627
|
def _generate_audio(songs, beats, new_beats, new_beats_cost, music_labels,
volume=None, volume_breakpoints=None,
springs=None, fade_in_len=3.0, fade_out_len=5.0):
# assuming same sample rate for all songs
logging.info("Building volume")
if volume is not None and volume_breakpoints is not None:
raise Exception("volume and volume_breakpoints cannot both be defined")
if volume_breakpoints is None:
if volume is None:
volume = 1.0
volume_array = np.array([volume])
if volume_breakpoints is not None:
volume_array = volume_breakpoints.to_array(songs[0].samplerate)
result_volume = np.zeros(volume_array.shape)
min_channels = min([x.channels for x in songs])
comp = Composition(channels=min_channels)
# currently assuming no transitions between different songs
beat_length = np.mean([song.analysis[BEAT_DUR_KEY]
for song in songs])
audio_segments = []
segment_song_indicies = [new_beats[0][0]]
current_seg = [0, 0]
if new_beats[0][0] == 'p':
current_seg = 'p'
for i, (song_i, b) in enumerate(new_beats):
if segment_song_indicies[-1] != song_i:
segment_song_indicies.append(song_i)
if current_seg == 'p' and song_i != 'p':
current_seg = [i, i]
elif current_seg != 'p' and song_i == 'p':
audio_segments.append(current_seg)
current_seg = 'p'
elif current_seg != 'p':
current_seg[1] = i
if current_seg != 'p':
audio_segments.append(current_seg)
segment_song_indicies = [x for x in segment_song_indicies if x != 'p']
beats = [np.array(b) for b in beats]
score_start = 0
current_loc = 0.0
last_segment_beat = 0
comp.add_tracks(songs)
all_cf_locations = []
aseg_fade_ins = []
logging.info("Building audio")
for (aseg, song_i) in zip(audio_segments, segment_song_indicies):
segments = []
# TODO: is this +1 correct?
starts = np.array([x[1] for x in new_beats[aseg[0]:aseg[1] + 1]])
bis = [np.nonzero(beats[song_i] == b)[0][0] for b in starts]
dists = np.zeros(len(starts))
durs = np.zeros(len(starts))
for i, beat in enumerate(starts):
if i < len(bis) - 1:
if bis[i] + 1 != bis[i + 1]:
dists[i + 1] = 1
if bis[i] + 1 >= len(beats[song_i]):
# use the average beat duration if we don't know
# how long the beat is supposed to be
logging.warning("USING AVG BEAT DURATION IN SYNTHESIS -\
POTENTIALLY NOT GOOD")
durs[i] = songs[song_i].analysis[BEAT_DUR_KEY]
else:
durs[i] = beats[song_i][bis[i] + 1] - beats[song_i][bis[i]]
# add pause duration to current location
# current_loc +=\
# (aseg[0] - last_segment_beat) *\
# song.analysis[BEAT_DUR_KEY]
# catch up to the pause
current_loc = max(
aseg[0] * beat_length,
current_loc)
last_segment_beat = aseg[1] + 1
cf_durations = []
seg_start = starts[0]
seg_start_loc = current_loc
cf_locations = []
segment_starts = [0]
try:
segment_starts.extend(np.where(dists == 1)[0])
except:
pass
# print "segment starts", segment_starts
for i, s_i in enumerate(segment_starts):
if i == len(segment_starts) - 1:
# last segment?
seg_duration = np.sum(durs[s_i:])
else:
next_s_i = segment_starts[i + 1]
seg_duration = np.sum(durs[s_i:next_s_i])
cf_durations.append(durs[next_s_i])
cf_locations.append(current_loc + seg_duration)
seg_music_location = starts[s_i]
seg = Segment(songs[song_i], current_loc,
seg_music_location, seg_duration)
segments.append(seg)
# update location for next segment
current_loc += seg_duration
# for i, start in enumerate(starts):
# dur = durs[i]
# current_loc += dur
# if i == 0 or dists[i - 1] == 0:
# pass
# # dur = durs[i]
# # current_loc += dur
# else:
# seg = Segment(song, seg_start_loc, seg_start,
# current_loc - seg_start_loc)
# print "segment duration", current_loc - seg_start_loc
# segments.append(seg)
# # track = Track(wav_fn, t["name"])
# # comp.add_track(track)
# # dur = durs[i]
# cf_durations.append(dur)
# cf_locations.append(current_loc)
# seg_start_loc = current_loc
# seg_start = start
# # current_loc += dur
# last_seg = Segment(song, seg_start_loc, seg_start,
# current_loc - seg_start_loc)
# segments.append(last_seg)
comp.add_segments(segments)
if segments[-1].comp_location + segments[-1].duration >\
len(volume_array):
diff = len(volume_array) -\
(segments[-1].comp_location + segments[-1].duration)
new_volume_array =\
np.ones(segments[-1].comp_location + segments[-1].duration) *\
volume_array[-1]
new_volume_array[:len(volume_array)] = volume_array
volume_array = new_volume_array
result_volume = np.zeros(new_volume_array.shape)
for i, seg in enumerate(segments[:-1]):
logging.info(cf_durations[i], seg.duration_in_seconds,
segments[i + 1].duration_in_seconds)
rawseg = comp.cross_fade(seg, segments[i + 1], cf_durations[i])
# decrease volume along crossfades
volume_frames = volume_array[
rawseg.comp_location:rawseg.comp_location + rawseg.duration]
raw_vol = RawVolume(rawseg, volume_frames)
comp.add_dynamic(raw_vol)
result_volume[rawseg.comp_location:
rawseg.comp_location + rawseg.duration] =\
volume_frames
s0 = segments[0]
sn = segments[-1]
if fade_in_len is not None:
fi_len = min(fade_in_len, s0.duration_in_seconds)
fade_in_len_samps = fi_len * s0.track.samplerate
fade_in = comp.fade_in(s0, fi_len, fade_type="linear")
aseg_fade_ins.append(fade_in)
else:
fade_in = None
if fade_out_len is not None:
fo_len = min(5.0, sn.duration_in_seconds)
fade_out_len_samps = fo_len * sn.track.samplerate
fade_out = comp.fade_out(sn, fade_out_len, fade_type="exponential")
else:
fade_out = None
prev_end = 0.0
for seg in segments:
volume_frames = volume_array[
seg.comp_location:seg.comp_location + seg.duration]
# this can happen on the final segment:
if len(volume_frames) == 0:
volume_frames = np.array([prev_end] * seg.duration)
elif len(volume_frames) < seg.duration:
delta = [volume_frames[-1]] *\
(seg.duration - len(volume_frames))
volume_frames = np.r_[volume_frames, delta]
raw_vol = RawVolume(seg, volume_frames)
comp.add_dynamic(raw_vol)
try:
result_volume[seg.comp_location:
seg.comp_location + seg.duration] = volume_frames
except __HOLE__:
diff = (seg.comp_location + seg.duration) - len(result_volume)
result_volume = np.r_[result_volume, np.zeros(diff)]
result_volume[seg.comp_location:
seg.comp_location + seg.duration] = volume_frames
if len(volume_frames) != 0:
prev_end = volume_frames[-1]
# vol = Volume.from_segment(seg, volume)
# comp.add_dynamic(vol)
if fade_in is not None:
result_volume[s0.comp_location:
s0.comp_location + fade_in_len_samps] *=\
fade_in.to_array(channels=1).flatten()
if fade_out is not None:
result_volume[sn.comp_location + sn.duration - fade_out_len_samps:
sn.comp_location + sn.duration] *=\
fade_out.to_array(channels=1).flatten()
all_cf_locations.extend(cf_locations)
# result labels
label_time = 0.0
pause_len = beat_length
# pause_len = song.analysis[BEAT_DUR_KEY]
result_full_labels = []
prev_label = -1
for beat_i, (song_i, beat) in enumerate(new_beats):
if song_i == 'p':
current_label = None
if current_label != prev_label:
result_full_labels.append(Label("pause", label_time))
prev_label = None
# label_time += pause_len
# catch up
label_time = max(
(beat_i + 1) * pause_len,
label_time)
else:
beat_i = np.where(np.array(beats[song_i]) == beat)[0][0]
next_i = beat_i + 1
current_label = music_labels[song_i][beat_i]
if current_label != prev_label:
if current_label is None:
result_full_labels.append(Label("none", label_time))
else:
result_full_labels.append(Label(current_label, label_time))
prev_label = current_label
if (next_i >= len(beats[song_i])):
logging.warning("USING AVG BEAT DURATION - "
"POTENTIALLY NOT GOOD")
label_time += songs[song_i].analysis[BEAT_DUR_KEY]
else:
label_time += beats[song_i][next_i] - beat
# result costs
cost_time = 0.0
result_cost = []
for i, (song_i, b) in enumerate(new_beats):
result_cost.append(Label(new_beats_cost[i], cost_time))
if song_i == 'p':
# cost_time += pause_len
# catch up
cost_time = max(
(i + 1) * pause_len,
cost_time)
else:
beat_i = np.where(np.array(beats[song_i]) == b)[0][0]
next_i = beat_i + 1
if (next_i >= len(beats[song_i])):
cost_time += songs[song_i].analysis[BEAT_DUR_KEY]
else:
cost_time += beats[song_i][next_i] - b
logging.info("Contracting pause springs")
contracted = []
min_contraction = 0.5
if springs is not None:
offset = 0.0
for spring in springs:
contracted_time, contracted_dur = comp.contract(
spring.time - offset, spring.duration,
min_contraction=min_contraction)
if contracted_dur > 0:
logging.info("Contracted", contracted_time,
"at", contracted_dur)
# move all the volume frames back
c_time_samps = contracted_time * segments[0].track.samplerate
c_dur_samps = contracted_dur * segments[0].track.samplerate
result_volume = np.r_[
result_volume[:c_time_samps],
result_volume[c_time_samps + c_dur_samps:]]
# can't move anything EARLIER than contracted_time
new_cf = []
for cf in all_cf_locations:
if cf > contracted_time:
new_cf.append(
max(cf - contracted_dur, contracted_time))
else:
new_cf.append(cf)
all_cf_locations = new_cf
# for lab in result_full_labels:
# if lab.time > contracted_time + contracted_dur:
# lab.time -= contracted_dur
first_label = True
for lab_i, lab in enumerate(result_full_labels):
# is this contracted in a pause that already started?
# if lab_i + 1 < len(result_full_labels):
# next_lab = result_full_labels[lab_i + 1]
# if lab.time < contracted_time <= next_lab.time:
# first_label = False
# if lab.time > contracted_time:
# # TODO: fix this hack
# if lab.name == "pause" and first_label:
# pass
# else:
# lab.time -= contracted_dur
# first_label = False
try:
if lab.time == contracted_time and\
result_full_labels[lab_i + 1].time -\
contracted_dur == lab.time:
logging.warning("LABEL HAS ZERO LENGTH", lab)
except:
pass
if lab.time > contracted_time:
logging.info("\tcontracting label", lab)
lab.time = max(
lab.time - contracted_dur, contracted_time)
# lab.time -= contracted_dur
logging.info("\t\tto", lab)
new_result_cost = []
for cost_lab in result_cost:
if cost_lab.time <= contracted_time:
# cost is before contracted time
new_result_cost.append(cost_lab)
elif contracted_time < cost_lab.time <=\
contracted_time + contracted_dur:
# cost is during contracted time
# remove these labels
if cost_lab.name > 0:
logging.warning("DELETING nonzero cost label",
cost_lab.name, cost_lab.time)
else:
# cost is after contracted time
cost_lab.time = max(
cost_lab.time - contracted_dur, contracted_time)
# cost_lab.time -= contracted_dur
new_result_cost.append(cost_lab)
# new_result_cost = []
# first_label = True
# # TODO: also this hack. bleh.
# for cost_lab in result_cost:
# if cost_lab.time < contracted_time:
# new_result_cost.append(cost_lab)
# elif cost_lab.time > contracted_time and\
# cost_lab.time <= contracted_time +\
# contracted_dur:
# if first_label:
# cost_lab.time = contracted_time
# new_result_cost.append(cost_lab)
# elif cost_lab.name > 0:
# print "DELETING nonzero cost label:",\
# cost_lab.name, cost_lab.time
# first_label = False
# elif cost_lab.time > contracted_time + contracted_dur:
# cost_lab.time -= contracted_dur
# new_result_cost.append(cost_lab)
# first_label = False
result_cost = new_result_cost
contracted.append(
Spring(contracted_time + offset, contracted_dur))
offset += contracted_dur
for fade in aseg_fade_ins:
for spring in contracted:
if (spring.time - 1 <
fade.comp_location_in_seconds <
spring.time + spring.duration + 1):
result_volume[
fade.comp_location:
fade.comp_location + fade.duration] /=\
fade.to_array(channels=1).flatten()
fade.fade_type = "linear"
fade.duration_in_seconds = 2.0
result_volume[
fade.comp_location:
fade.comp_location + fade.duration] *=\
fade.to_array(channels=1).flatten()
logging.info("Changing fade at {}".format(
fade.comp_location_in_seconds))
# for seg in comp.segments:
# print seg.comp_location, seg.duration
# print
# for dyn in comp.dynamics:
# print dyn.comp_location, dyn.duration
# add all the segments to the composition
# comp.add_segments(segments)
# all_segs = []
# for i, seg in enumerate(segments[:-1]):
# rawseg = comp.cross_fade(seg, segments[i + 1], cf_durations[i])
# all_segs.extend([seg, rawseg])
# # decrease volume along crossfades
# rawseg.track.frames *= music_volume
# all_segs.append(segments[-1])
# add dynamic for music
# vol = Volume(song, 0.0,
# (last_seg.comp_location + last_seg.duration) /
# float(song.samplerate),
# volume)
# comp.add_dynamic(vol)
# cf durs?
# durs
return (comp, all_cf_locations, result_full_labels,
result_cost, contracted, result_volume)
|
ValueError
|
dataset/ETHPy150Open ucbvislab/radiotool/radiotool/algorithms/retarget.py/_generate_audio
|
1,628
|
def get_remote_dir(remote_dir, local_dir='.', extension=None, verbose=False):
if extension is not None:
remote_dir = '{}/*.{}'.format(remote_dir, extension)
gsutil = shutil.which('gsutil')
cmd = [gsutil, '-m', 'cp', '-r', remote_dir, local_dir]
if verbose:
print(cmd)
try:
os.mkdir(local_dir)
except __HOLE__ as e:
warnings.warn("Can't create dir: {}".format(e))
try:
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
if verbose:
print("Command completed: {}".format(proc.stdout))
except Exception as e:
warnings.warn("Can't run command: {}".format(e))
sys.exit(1)
|
OSError
|
dataset/ETHPy150Open panoptes/POCS/scripts/gather_pec_data.py/get_remote_dir
|
1,629
|
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(),
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'rest_framework',
'rest_framework_mongoengine',
'tests',
),
AUTHENTICATION_BACKENDS=(),
PASSWORD_HASHERS=(),
)
from mongoengine import connect
connect('test')
try:
import django
django.setup()
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open umutbozkurt/django-rest-framework-mongoengine/tests/conftest.py/pytest_configure
|
1,630
|
def list(self):
try:
files = os.listdir(self.folder)
except __HOLE__:
files = []
return files
|
IOError
|
dataset/ETHPy150Open tallstreet/Whoosh-AppEngine/src/whoosh/store.py/FileStorage.list
|
1,631
|
def get_column_name(self, index):
""" Return the name of the column specified by the
(zero-based) index. """
if self.columns is not None:
# if we have an explicit declaration then use it
try:
name = self.columns[index].label
except __HOLE__:
name = ''
else:
# otherwise return the index plus 1
name = str(index + 1)
return name
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.get_column_name
|
1,632
|
def is_column_read_only(self, index):
""" Return True if the column specified by the zero-based index
is read-only. """
# if there is no declaration then assume the column is not
# read only
read_only = False
if self.columns is not None:
# if we have an explicit declaration then use it
try:
read_only = self.columns[index].read_only
except __HOLE__:
pass
return read_only
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.is_column_read_only
|
1,633
|
def get_row_name(self, index):
""" Return the name of the row specified by the
(zero-based) index. """
if self.rows is not None:
# if we have an explicit declaration then use it
try:
name = self.rows[index].label
except __HOLE__:
name = str(index + 1)
else:
# otherwise return the index plus 1
name = str(index + 1)
return name
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.get_row_name
|
1,634
|
def is_row_read_only(self, index):
""" Return True if the row specified by the zero-based index
is read-only. """
# if there is no declaration then assume the row is not
# read only
read_only = False
if self.rows is not None:
# if we have an explicit declaration then use it
try:
read_only = self.rows[index].read_only
except __HOLE__:
pass
return read_only
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.is_row_read_only
|
1,635
|
def get_value(self, row, col):
""" Return the value stored in the table at (row, col). """
try:
return self.data[row][col]
except __HOLE__:
pass
return ''
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.get_value
|
1,636
|
def is_cell_empty(self, row, col):
""" Returns True if the cell at (row, col) has a None value,
False otherwise."""
if row >= self.get_row_count() or col >= self.get_column_count():
empty = True
else:
try:
value = self.get_value(row, col)
empty = value is None
except __HOLE__:
empty = True
return empty
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.is_cell_empty
|
1,637
|
def _set_value(self, row, col, value):
""" Sets the value of the cell at (row, col) to value.
Raises a ValueError if the value is vetoed or the cell at
(row, col) does not exist. """
new_rows = 0
try:
self.data[row][col] = value
except __HOLE__:
# Add a new row.
self.data.append([0] * self.GetNumberCols())
self.data[row][col] = value
new_rows = 1
return new_rows
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel._set_value
|
1,638
|
def __get_data_column(self, col):
""" Return a 1-d list of data from the column indexed by col. """
row_count = self.get_row_count()
coldata = []
for row in range(row_count):
try:
coldata.append(self.get_value(row, col))
except __HOLE__:
coldata.append(None)
return coldata
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.__get_data_column
|
1,639
|
def __get_data_row(self, row):
""" Return a 1-d list of data from the row indexed by row. """
col_count = self.get_column_count()
rowdata = []
for col in range(col_count):
try:
rowdata.append(self.get_value(row, col))
except __HOLE__:
rowdata.append(None)
return rowdata
# Private class
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/wx/grid/simple_grid_model.py/SimpleGridModel.__get_data_row
|
1,640
|
def __init__(self, app_title, app_banner, theme, services_service):
self.oauth_modules = {}
self._error_message = u''
self.services_service = services_service
self.app_title = app_title
self.app_banner = app_banner
self.theme = theme
self.content = component.Component() # workaround nagare weird behavior of call if on_answer registered on this component
for source, cfg in self.config.iteritems():
try:
if cfg['activated'] == 'on':
self.oauth_modules[source] = component.Component(
OAuthConnection(
oauth_providers.providers[source](
cfg['key'],
cfg['secret'],
SCOPES.get(source, ('profile', 'email'))
)
)
)
except __HOLE__:
# source is not a provider entry
continue
|
TypeError
|
dataset/ETHPy150Open Net-ng/kansha/kansha/authentication/oauth/forms.py/Login.__init__
|
1,641
|
def main():
PROJECT_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
output_dir = os.environ.get("TEST_COVERAGE_OUTPUT_DIR", os.path.join(PROJECT_ROOT, "coverage"))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
import settings as sett
os.environ["DJANGO_SETTINGS_MODULE"] = sett.__name__
print >>sys.stderr, "Test coverage output will be stored in %s" % output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s', filename=os.path.join(output_dir, "tests.log"))
from django.conf import settings
# Start code coverage before anything else if necessary
use_coverage = hasattr(settings, 'COVERAGE_MODULES') and len(settings.COVERAGE_MODULES)
if use_coverage:
if len(sys.argv) > 1 and sys.argv[1] == "--branch":
sys.argv.pop(1)
cov = coverage(branch=True) # Enable super experimental branch support
else:
cov = coverage()
cov.use_cache(0) # Do not cache any of the coverage.py stuff
cov.exclude('^\s*$') # Exclude empty lines
cov.exclude('^\s*#.*$') # Exclude comment blocks
cov.exclude('^\s*(import|from)\s') # Exclude import statements
cov.start()
from django.conf import settings
from django.db.models import get_app, get_apps
# NOTE: Normally we'd use ``django.core.management.commands.test`` here but
# we want to have South's intelligence for applying database migrations or
# syncing everything directly (based on ``settings.SOUTH_TESTS_MIGRATE``).
# South's test Command is a subclass of the standard Django test Command so
# it's otherwise identical:
try:
from south.management.commands import test
except ImportError:
from django.core.management.commands import test
# Suppress debugging displays, etc. to test as real users will see it:
settings.DEBUG = False
settings.TEMPLATE_DEBUG = False
# This avoids things being cached when we attempt to regenerate them.
settings.CACHE_BACKEND = 'dummy:///'
# According to http://docs.djangoproject.com/en/1.0/topics/cache/#order-of-middleware-classes
# this should not be ahead of UpdateCacheMiddleware but to avoid this unresolved Django bug
# http://code.djangoproject.com/ticket/5176 we have to place SessionMiddleware first to avoid
# failures:
mc = list(settings.MIDDLEWARE_CLASSES)
try:
mc.remove('django.middleware.cache.FetchFromCacheMiddleware')
mc.remove('django.middleware.cache.UpdateCacheMiddleware')
except __HOLE__:
pass
settings.MIDDLEWARE_CLASSES = tuple(mc)
# If the user provided modules on the command-line we'll only test the
# listed modules. Otherwise we'll build a list of installed applications
# which we wrote and pretend the user entered that on the command-line
# instead.
test_labels = [ i for i in sys.argv[1:] if not i[0] == "-"]
if not test_labels:
test_labels = []
site_name = settings.SETTINGS_MODULE.split(".")[0]
for app in get_apps():
pkg = app.__package__ or app.__name__.replace(".models", "")
if pkg in settings.COVERAGE_MODULES:
test_labels.append(pkg)
else:
print >>sys.stderr, "Skipping tests for %s" % pkg
test_labels.sort()
print >>sys.stderr, "Automatically generated test labels for %s: %s" % (site_name, ", ".join(test_labels))
sys.argv.extend(test_labels)
settings.DEBUG = False
settings.TEMPLATE_DEBUG = False
command = test.Command()
rc = 0
sys.argv.insert(1, "test")
try:
command.run_from_argv(sys.argv)
except SystemExit, e:
rc = e.code
# Stop code coverage after tests have completed
if use_coverage:
cov.stop()
coverage_modules = filter(None, [
sys.modules[k] for k in sys.modules if any(
l for l in [ l.split(".")[0] for l in test_labels]
# Avoid issues with an empty models.py causing __package__ == None
if k.startswith(get_app(l).__package__ or get_app(l).__name__.replace(".models", ""))
)
])
if use_coverage:
# Print code metrics header
print ''
print '-------------------------------------------------------------------------'
print ' Unit Test Code Coverage Results'
print '-------------------------------------------------------------------------'
# Report code coverage metrics
cov.report(coverage_modules)
cov.html_report(coverage_modules, directory=output_dir)
cov.xml_report(coverage_modules, outfile=os.path.join(output_dir, "coverage.xml"))
# Print code metrics footer
print '-------------------------------------------------------------------------'
if rc != 0:
print >>sys.stderr, "Coverage report is not be accurate due to non-zero exit status: %d" % rc
sys.exit(rc)
|
ValueError
|
dataset/ETHPy150Open willhardy/django-seo/regressiontests/test-coverage.py/main
|
1,642
|
def get_tokens_unprocessed(self, text, stack=('root',)):
""" Split ``text`` into (tokentype, text) pairs.
Monkeypatched to store the final stack on the object itself.
The `text` parameter that gets passed is only the current line, so to
highlight things like multiline strings correctly, we need to retrieve
the state from the previous line (this is done in PygmentsHighlighter,
below), and use it to continue processing the current line.
"""
pos = 0
tokendefs = self._tokens
if hasattr(self, '_saved_state_stack'):
statestack = list(self._saved_state_stack)
else:
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except __HOLE__:
break
self._saved_state_stack = list(statestack)
# Monkeypatch!
|
IndexError
|
dataset/ETHPy150Open enthought/pyface/pyface/ui/qt4/code_editor/pygments_highlighter.py/get_tokens_unprocessed
|
1,643
|
def build(self, dist=False):
"""
Save the rendered output to the output file.
"""
logging.info("Building %s", self.path)
data = self.render()
# Make sure a folder for the output path exists
try:
os.makedirs(
os.path.dirname(
os.path.join(
self.site.paths["dist" if dist else "build"],
self.path
)
)
)
except __HOLE__:
pass
# Write the data to the output file
f = codecs.open(
os.path.join(
self.site.paths["dist" if dist else "build"],
self.path
),
'w',
'utf-8'
)
f.write(data)
f.close()
# Run all plugins
#self.site.pluginMethod('postBuildPage',
# self.site, self.paths['full-build'])
|
OSError
|
dataset/ETHPy150Open randomknowledge/Cactus_Refactored/cactus/page.py/Page.build
|
1,644
|
@permission_required("core.manage_shop")
def manage_delivery_times(request):
"""Dispatches to the first delivery time or to the form to add a delivery
time (if there is no delivery time yet).
"""
try:
delivery_time = DeliveryTime.objects.all()[0]
url = reverse("lfs_manage_delivery_time", kwargs={"id": delivery_time.id})
except __HOLE__:
url = reverse("lfs_manage_add_delivery_time")
return HttpResponseRedirect(url)
|
IndexError
|
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/views/delivery_times.py/manage_delivery_times
|
1,645
|
def main():
sys.path.insert(0, os.getcwd())
# argument parser
debug, no_watch = argument_parser()
# log
make_logging(debug)
print_logo()
print "pfrock version %s " % pfrock.__version__
time.sleep(1)
# parser
config_server = PfrockConfigParser.do(pfrockfile)
# routes
if config_server:
route_list = RoutesMgr.get_routes(config_server)
# new frock
p_frock = PFrock(auto_reload=not no_watch, port=config_server.port)
else:
route_list = []
# new frock
p_frock = PFrock(auto_reload=not no_watch)
p_frock.add_watch(pfrockfile)
p_frock.add_handler(route_list)
# start
try:
p_frock.start()
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open knightliao/pfrock/pfrock/console_scripts.py/main
|
1,646
|
def file_corpus(filename, encoding='utf8', nltk_stop=True, stop_freq=1,
add_stop=None, decode=False, simple=False,
tokenizer=word_tokenize):
"""
`file_corpus` is a convenience function for generating Corpus
objects from a a plain text corpus contained in a single string.
`file_corpus` will strip punctuation and arabic numerals outside
the range 1-29. All letters are made lowercase.
:param filename: File name of the plain text file.
:type plain_dir: string-like
:param encoding: A string indicating the file encoding or 'detect',
in which case `chardet` is used to automatically guess the encoding.
Default is `utf8`.
:type encoding: string, optional
:param nltk_stop: If `True` then the corpus object is masked
using the NLTK English stop words. Default is `False`.
:type nltk_stop: boolean, optional
:param stop_freq: The upper bound for a word to be masked on
the basis of its collection frequency. Default is 1.
:type stop_freq: int, optional
:param add_stop: A list of stop words. Default is `None`.
:type add_stop: array-like, optional
:param decode: If `True` then unicode characters are converted to
ASCII. Default is `False`.
:type decode: boolean, optional
:returns: c : a Corpus object
Contains the tokenized corpus built from the input plain-text
corpus. Document tokens are named `documents`.
:See Also: :class:`vsm.corpus.Corpus`,
:meth:`file_tokenize`,
:meth:`vsm.corpus.util.apply_stoplist`
"""
if encoding == 'detect':
encoding = detect_encoding(filename)
try:
with open(filename, mode='r', encoding=encoding) as f:
text = f.read()
except __HOLE__:
encoding = detect_encoding(filename)
if decode:
text = unidecode(text)
words, tok = file_tokenize(text, simple=simple, tokenizer=tokenizer)
names, data = zip(*tok.items())
c = Corpus(words, context_data=data, context_types=names)
if nltk_stop or stop_freq or add_stop:
c = apply_stoplist(c, nltk_stop=nltk_stop,
freq=stop_freq, add_stop=add_stop)
return c
|
UnicodeDecodeError
|
dataset/ETHPy150Open inpho/vsm/vsm/extensions/corpusbuilders/corpusbuilders.py/file_corpus
|
1,647
|
def dir_corpus(plain_dir, chunk_name='article', encoding='utf8',
paragraphs=True, ignore=['.json','.log','.pickle'],
nltk_stop=True, stop_freq=1, add_stop=None, decode=False,
verbose=1, simple=False, tokenizer=word_tokenize):
"""
`dir_corpus` is a convenience function for generating Corpus
objects from a directory of plain text files.
`dir_corpus` will retain file-level tokenization and perform
sentence and word tokenizations. Optionally, it will provide
paragraph-level tokenizations.
It will also strip punctuation and arabic numerals outside the
range 1-29. All letters are made lowercase.
:param plain_dir: String containing directory containing a
plain-text corpus.
:type plain_dir: string-like
:param chunk_name: The name of the tokenization corresponding
to individual files. For example, if the files are pages
of a book, one might set `chunk_name` to `pages`. Default
is `articles`.
:type chunk_name: string-like, optional
:param encoding: A string indicating the file encoding or 'detect',
in which case `chardet` is used to automatically guess the encoding.
Default is `utf8`.
:type encoding: string, optional
:param paragraphs: If `True`, a paragraph-level tokenization
is included. Defaults to `True`.
:type paragraphs: boolean, optional
:param ignore: The list containing suffixes of files to be filtered.
The suffix strings are normally file types. Default is ['.json',
'.log','.pickle'].
:type ignore: list of strings, optional
:param nltk_stop: If `True` then the corpus object is masked
using the NLTK English stop words. Default is `False`.
:type nltk_stop: boolean, optional
:param stop_freq: The upper bound for a word to be masked on
the basis of its collection frequency. Default is 1.
:type stop_freq: int, optional
:param add_stop: A list of stop words. Default is `None`.
:type add_stop: array-like, optional
:param decode: If `True` then unicode characters are converted to
ASCII. Default is `False`.
:type decode: boolean, optional
:param verbose: Verbosity level. 1 prints a progress bar.
:type verbose: int, default 1
:returns: c : a Corpus object
Contains the tokenized corpus built from the input plain-text
corpus. Document tokens are named `documents`.
:See Also: :class:`vsm.corpus.Corpus`,
:meth:`dir_tokenize`,
:meth:`vsm.corpus.util.apply_stoplist`
"""
chunks = []
filenames = os.listdir(plain_dir)
filenames = filter_by_suffix(filenames, ignore)
filenames.sort()
for filename in filenames:
filename = os.path.join(plain_dir, filename)
if encoding == 'detect':
encoding = detect_encoding(filename)
try:
if decode:
with open(filename, mode='r', encoding=encoding) as f:
chunks.append(unidecode(f.read()))
else:
with open(filename, mode='r', encoding=encoding) as f:
chunks.append(f.read())
except __HOLE__:
encoding = detect_encoding(filename)
if decode:
with open(filename, mode='r', encoding=encoding) as f:
chunks.append(unidecode(f.read()))
else:
with open(filename, mode='r', encoding=encoding) as f:
chunks.append(f.read())
words, tok = dir_tokenize(chunks, filenames, chunk_name=chunk_name,
paragraphs=paragraphs, verbose=verbose,
simple=simple, tokenizer=tokenizer)
names, data = zip(*tok.items())
c = Corpus(words, context_data=data, context_types=names)
if nltk_stop or stop_freq or add_stop:
c = apply_stoplist(c, nltk_stop=nltk_stop,
freq=stop_freq, add_stop=add_stop)
return c
|
UnicodeDecodeError
|
dataset/ETHPy150Open inpho/vsm/vsm/extensions/corpusbuilders/corpusbuilders.py/dir_corpus
|
1,648
|
def coll_corpus(coll_dir, encoding='utf8', ignore=['.json', '.log', '.pickle'],
nltk_stop=True, stop_freq=1, add_stop=None,
decode=False, verbose=1, simple=False, tokenizer=word_tokenize):
"""
`coll_corpus` is a convenience function for generating Corpus
objects from a directory of plain text files.
It will also strip punctuation and arabic numerals outside the
range 1-29. All letters are made lowercase.
:param coll_dir: Directory containing a collections of books
which contain pages as plain-text files.
:type coll_dir: string-like
:param encoding: A string indicating the file encoding or 'detect',
in which case `chardet` is used to automatically guess the encoding.
Default is `utf8`.
:type encoding: string, optional
:param ignore: The list containing suffixes of files to be filtered.
The suffix strings are normally file types. Default is ['.json',
'.log','.pickle'].
:type ignore: list of strings, optional
:param nltk_stop: If `True` then the corpus object is masked
using the NLTK English stop words. Default is `False`.
:type nltk_stop: boolean, optional
:param stop_freq: The upper bound for a word to be masked on
the basis of its collection frequency. Default is 1.
:type stop_freq: int, optional
:param add_stop: A list of stop words. Default is `None`.
:type add_stop: array-like, optional
:param decode: If `True` then unicode characters are converted to
ASCII. Default is `False`.
:type decode: boolean, optional
:param verbose: Verbosity level. 1 prints a progress bar.
:type verbose: int, default 1
:returns: c : a Corpus object
Contains the tokenized corpus built from the plain-text files
in `coll_dir` corpus. Document tokens are named `documents`.
"""
books = []
book_names = os.listdir(coll_dir)
book_names = filter_by_suffix(book_names, ignore)
book_names.sort()
for book_name in book_names:
pages = []
book_path = os.path.join(coll_dir, book_name)
page_names = os.listdir(book_path)
page_names = filter_by_suffix(page_names, ignore)
page_names.sort()
for page_name in page_names:
page_file = book_name + '/' + page_name
page_name = os.path.join(book_path, page_name)
if encoding == 'detect':
encoding = detect_encoding(page_name)
try:
if decode:
with open(page_name, mode='r', encoding=encoding) as f:
pages.append((unidecode(f.read()), page_file))
else:
with open(page_name, mode='r', encoding=encoding) as f:
pages.append((f.read(), page_file))
except __HOLE__:
encoding = detect_encoding(page_name)
if decode:
with open(page_name, mode='r', encoding=encoding) as f:
pages.append((unidecode(f.read()), page_file))
else:
with open(page_name, mode='r', encoding=encoding) as f:
pages.append((f.read(), page_file))
books.append(pages)
words, tok = coll_tokenize(books, book_names, simple=simple,
tokenizer=tokenizer)
names, data = zip(*tok.items())
c = Corpus(words, context_data=data, context_types=names)
in_place_stoplist(c, nltk_stop=nltk_stop,
freq=stop_freq, add_stop=add_stop)
return c
|
UnicodeDecodeError
|
dataset/ETHPy150Open inpho/vsm/vsm/extensions/corpusbuilders/corpusbuilders.py/coll_corpus
|
1,649
|
def runtests(*test_args):
# Setup settings
if not settings.configured:
settings.configure(**SETTINGS)
# New Django 1.7 app registry setup
try:
from django import setup
setup()
except ImportError:
pass
# New Django 1.8 test runner
try:
from django.test.runner import DiscoverRunner as TestRunner
except __HOLE__:
from django.test.simple import DjangoTestSuiteRunner as TestRunner
test_runner = TestRunner(verbosity=1)
failures = test_runner.run_tests(['dpaste'])
if failures:
sys.exit(failures)
|
ImportError
|
dataset/ETHPy150Open bartTC/dpaste/runtests.py/runtests
|
1,650
|
def load_handlers(config, handler_names):
"""
Load handlers
"""
log = logging.getLogger('diamond')
handlers = []
if isinstance(handler_names, basestring):
handler_names = [handler_names]
for handler in handler_names:
log.debug('Loading Handler %s', handler)
try:
# Load Handler Class
cls = load_dynamic_class(handler, Handler)
cls_name = cls.__name__
# Initialize Handler config
handler_config = configobj.ConfigObj()
# Merge default Handler default config
handler_config.merge(config['handlers']['default'])
# Check if Handler config exists
if cls_name in config['handlers']:
# Merge Handler config section
handler_config.merge(config['handlers'][cls_name])
# Check for config file in config directory
if 'handlers_config_path' in config['server']:
configfile = os.path.join(
config['server']['handlers_config_path'],
cls_name) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
handler_config.merge(configobj.ConfigObj(configfile))
# Initialize Handler class
h = cls(handler_config)
handlers.append(h)
except (__HOLE__, SyntaxError):
# Log Error
log.warning("Failed to load handler %s. %s",
handler,
traceback.format_exc())
continue
return handlers
|
ImportError
|
dataset/ETHPy150Open BrightcoveOS/Diamond/src/diamond/utils/classes.py/load_handlers
|
1,651
|
def load_collectors(paths=None, filter=None):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
log = logging.getLogger('diamond')
if paths is None:
return
if isinstance(paths, basestring):
paths = paths.split(',')
paths = map(str.strip, paths)
load_include_path(paths)
for path in paths:
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = load_collectors([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] != 'test'
and f[0] != '.'):
# Check filter
if filter and os.path.join(path, f) != filter:
continue
modname = f[:-3]
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except (KeyboardInterrupt, __HOLE__) as err:
log.error(
"System or keyboard interrupt "
"while loading module %s"
% modname)
if isinstance(err, SystemExit):
sys.exit(err.code)
raise KeyboardInterrupt
except:
# Log error
log.error("Failed to import module: %s. %s",
modname,
traceback.format_exc())
continue
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if (inspect.isclass(attr)
and issubclass(attr, Collector)
and attr != Collector):
if attrname.startswith('parent_'):
continue
# Get class name
fqcn = '.'.join([modname, attrname])
try:
# Load Collector class
cls = load_dynamic_class(fqcn, Collector)
# Add Collector class
collectors[cls.__name__] = cls
except Exception:
# Log error
log.error(
"Failed to load Collector: %s. %s",
fqcn, traceback.format_exc())
continue
# Return Collector classes
return collectors
|
SystemExit
|
dataset/ETHPy150Open BrightcoveOS/Diamond/src/diamond/utils/classes.py/load_collectors
|
1,652
|
def __init__(self, worker_count=None, *args, **kwargs):
"""Initializes a task executor.
This may take a bit to run, as the process pool is primed.
Args:
worker_count: Number of worker threads to use when building. None to use
as many processors as are available.
"""
super(MultiProcessTaskExecutor, self).__init__(*args, **kwargs)
self.worker_count = worker_count
self._waiting_deferreds = {}
try:
self._pool = multiprocessing.Pool(processes=self.worker_count,
initializer=_task_initializer)
except __HOLE__ as e: # pragma: no cover
print e
print 'Unable to initialize multiprocessing!'
if sys.platform == 'cygwin':
print ('Cygwin has known issues with multiprocessing and there\'s no '
'workaround. Boo!')
print 'Try running with -j1 to disable multiprocessing'
raise
|
OSError
|
dataset/ETHPy150Open google/anvil-build/anvil/task.py/MultiProcessTaskExecutor.__init__
|
1,653
|
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input=b'foo',
delay_on_retry=False)
except __HOLE__ as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure',
'stdin did not always get passed correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
|
OSError
|
dataset/ETHPy150Open openstack/ironic-python-agent/ironic_python_agent/tests/unit/test_utils.py/ExecuteTestCase.test_retry_on_failure
|
1,654
|
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
utils.execute(tmpfilename,
tmpfilename2,
process_input=b'foo',
attempts=2)
except __HOLE__ as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
|
OSError
|
dataset/ETHPy150Open openstack/ironic-python-agent/ironic_python_agent/tests/unit/test_utils.py/ExecuteTestCase.test_no_retry_on_success
|
1,655
|
def current_knesset(self):
if self._current_knesset is None:
try:
self._current_knesset = self.get_query_set().order_by('-number')[0]
except __HOLE__:
#FIX: should document when and why this should happen
return None
return self._current_knesset
|
IndexError
|
dataset/ETHPy150Open ofri/Open-Knesset/mks/managers.py/KnessetManager.current_knesset
|
1,656
|
def callback(self, inputs, outputs, errors):
try:
for s in inputs:
if s == self.server:
try:
conn, addr = self.server.accept()
except socket.error, e:
if e[0] == 24: # ulimit maxfiles, need to raise ulimit
self._root.console_write('Maximum files reached, refused new connection.')
else:
raise socket.error, e
client = Client.Client(self._root, conn, addr, self._root.session_id)
self.addClient(client)
else:
try:
data = s.recv(1024)
if data:
if s in self.socketmap: # for threading, just need to pass this to a worker thread... remember to fix the problem for any calls to handler, and fix msg ids (handler.thread)
self.socketmap[s].Handle(data)
else:
print 'Problem, sockets are not being cleaned up properly.'
else:
raise socket.error, 'Connection closed.'
except socket.error:
self.removeSocket(s)
for s in outputs:
try:
self.socketmap[s].FlushBuffer()
except __HOLE__:
self.removeSocket(s)
except socket.error:
self.removeSocket(s)
except: self._root.error(traceback.format_exc())
|
KeyError
|
dataset/ETHPy150Open lunixbochs/uberserver/Dispatcher.py/Dispatcher.callback
|
1,657
|
def finishRemove(self, client, reason='Quit'):
if client.static or not client._protocol: return # static clients don't disconnect
client._protocol._remove(client, reason)
s = client.conn
if s in self.socketmap: del self.socketmap[s]
self.poller.unregister(s)
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except socket.error: #socket shut down by itself ;) probably got a bad file descriptor
try:
s.close()
except socket.error:
pass # in case shutdown was called but not close.
except __HOLE__:
pass
self._root.console_write('Client disconnected from %s, session ID was %s'%(client.ip_address, client.session_id))
|
AttributeError
|
dataset/ETHPy150Open lunixbochs/uberserver/Dispatcher.py/Dispatcher.finishRemove
|
1,658
|
@app.route('/_ping')
@app.route('/v1/_ping')
def ping():
headers = {
'X-Docker-Registry-Standalone': 'mirror' if mirroring.is_mirror()
else (cfg.standalone is True)
}
infos = {}
if cfg.debug:
# Versions
versions = infos['versions'] = {}
headers['X-Docker-Registry-Config'] = cfg.flavor
for name, module in sys.modules.items():
if name.startswith('_'):
continue
try:
version = module.__version__
except __HOLE__:
continue
versions[name] = version
versions['python'] = sys.version
# Hosts infos
infos['host'] = platform.uname()
infos['launch'] = sys.argv
return toolkit.response(infos, headers=headers)
|
AttributeError
|
dataset/ETHPy150Open docker/docker-registry/docker_registry/app.py/ping
|
1,659
|
def load_obj_from_path(import_path, prefix=None, ld=dict()):
"""
import a python object from an import path
`import_path` - a python import path. For instance:
mypackage.module.func
or
mypackage.module.class
`prefix` (str) - a value to prepend to the import path
if it isn't already there. For instance:
load_obj_from_path('module.func', prefix='mypackage')
is the same as
load_obj_from_path('mypackage.module.func')
`ld` (dict) key:value data to pass to the logger if an error occurs
"""
if prefix and not import_path.startswith(prefix):
import_path = '.'.join([prefix, import_path])
log.debug(
'attempting to load a python object from an import path',
extra=dict(import_path=import_path, **ld))
try:
mod = importlib.import_module(import_path)
return mod # yay, we found a module. return it
except:
pass # try to extract an object from a module
try:
path, obj_name = import_path.rsplit('.', 1)
except __HOLE__:
log_raise(
("import path needs at least 1 period in your import path."
" An example import path is something like: module.obj"),
dict(import_path=import_path, **ld), InvalidImportPath)
try:
mod = importlib.import_module(path)
except ImportError:
newpath = path.replace(prefix, '', 1).lstrip('.')
log.debug(
"Could not load import path. Trying a different one",
extra=dict(oldpath=path, newpath=newpath))
path = newpath
mod = importlib.import_module(path)
try:
obj = getattr(mod, obj_name)
except AttributeError:
log_raise(
("object does not exist in given module."
" Your import path is not"
" properly defined because the given `obj_name` does not exist"),
dict(import_path=path, obj_name=obj_name, **ld),
InvalidImportPath)
return obj
|
ValueError
|
dataset/ETHPy150Open sailthru/relay/relay/util.py/load_obj_from_path
|
1,660
|
def next(itr, default=_undef):
"compat wrapper for next()"
if default is _undef:
return itr.next()
try:
return itr.next()
except __HOLE__:
return default
|
StopIteration
|
dataset/ETHPy150Open twisted/ldaptor/ldaptor/compat.py/next
|
1,661
|
def tearDown(self):
os.chdir(self.startdir)
if not os.environ.get('OPENMDAO_KEEPDIRS', False):
try:
shutil.rmtree(self.tempdir)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/components/test/test_external_code.py/TestExternalCode.tearDown
|
1,662
|
def test_error_code_raise(self):
self.extcode.options['command'] = ['python', 'external_code_for_testing.py',
'external_code_output.txt', '--delay', '-3']
self.extcode.options['timeout'] = 1.0
self.extcode.options['external_input_files'] = ['external_code_for_testing.py', ]
dev_null = open(os.devnull, 'w')
self.top.setup(check=True, out_stream=dev_null)
try:
self.top.run()
except __HOLE__ as exc:
self.assertTrue('Traceback' in str(exc))
self.assertEqual(self.extcode.return_code, 1)
else:
self.fail('Expected RuntimeError')
|
RuntimeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/components/test/test_external_code.py/TestExternalCode.test_error_code_raise
|
1,663
|
def test_badcmd(self):
# Set command to nonexistant path.
self.extcode.options['command'] = ['no-such-command', ]
self.top.setup(check=False)
try:
self.top.run()
except __HOLE__ as exc:
msg = "The command to be executed, 'no-such-command', cannot be found"
self.assertEqual(str(exc), msg)
self.assertEqual(self.extcode.return_code, -999999)
else:
self.fail('Expected ValueError')
|
ValueError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/components/test/test_external_code.py/TestExternalCode.test_badcmd
|
1,664
|
def test_nullcmd(self):
self.extcode.stdout = 'nullcmd.out'
self.extcode.stderr = STDOUT
self.top.setup(check=False)
try:
self.top.run()
except __HOLE__ as exc:
self.assertEqual(str(exc), 'Empty command list')
else:
self.fail('Expected ValueError')
finally:
if os.path.exists(self.extcode.stdout):
os.remove(self.extcode.stdout)
|
ValueError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/components/test/test_external_code.py/TestExternalCode.test_nullcmd
|
1,665
|
def check(self, agentConfig):
try:
os = w.Win32_PerfFormattedData_PerfOS_System()[0]
except AttributeError:
self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.'
' No process metrics will be returned.')
return
try:
cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name="_Total")[0]
except __HOLE__:
self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.'
' No process metrics will be returned.')
return
if os.ProcessorQueueLength is not None:
self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)
if os.Processes is not None:
self.save_sample('system.proc.count', os.Processes)
return self.get_metrics()
|
AttributeError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/system/win32.py/Processes.check
|
1,666
|
def check(self, agentConfig):
try:
os = w.Win32_OperatingSystem()[0]
except __HOLE__:
self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
return
total = 0
free = 0
cached = 0
if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
total = int(os.TotalVisibleMemorySize) / KB2MB
free = int(os.FreePhysicalMemory) / KB2MB
self.save_sample('system.mem.total', total)
self.save_sample('system.mem.free', free)
self.save_sample('system.mem.used', total - free)
mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
if mem.CacheBytes is not None:
cached = int(mem.CacheBytes) / B2MB
self.save_sample('system.mem.cached', cached)
if mem.CommittedBytes is not None:
self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
if mem.PoolPagedBytes is not None:
self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)
if mem.PoolNonpagedBytes is not None:
self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
usable = free + cached
self.save_sample('system.mem.usable', usable)
if total > 0:
pct_usable = float(usable) / total
self.save_sample('system.mem.pct_usable', pct_usable)
return self.get_metrics()
|
AttributeError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/system/win32.py/Memory.check
|
1,667
|
def check(self, agentConfig):
try:
cpu = w.Win32_PerfFormattedData_PerfOS_Processor()
except __HOLE__:
self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.'
' No CPU metrics will be returned.')
return
cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')
if cpu_interrupt is not None:
self.save_sample('system.cpu.interrupt', cpu_interrupt)
cpu_percent = psutil.cpu_times()
self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)
self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)
self.save_sample('system.cpu.system', 100 * cpu_percent.system / psutil.NUM_CPUS)
return self.get_metrics()
|
AttributeError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/system/win32.py/Cpu.check
|
1,668
|
def check(self, agentConfig):
try:
net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()
except __HOLE__:
self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.'
' No network metrics will be returned')
return
for iface in net:
name = self.normalize_device_name(iface.name)
if iface.BytesReceivedPerSec is not None:
self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,
device_name=name)
if iface.BytesSentPerSec is not None:
self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,
device_name=name)
return self.get_metrics()
|
AttributeError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/system/win32.py/Network.check
|
1,669
|
def check(self, agentConfig):
try:
disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()
except __HOLE__:
self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.'
' No I/O metrics will be returned.')
return
blacklist_re = agentConfig.get('device_blacklist_re', None)
for device in disk:
name = self.normalize_device_name(device.name)
if should_ignore_disk(name, blacklist_re):
continue
if device.DiskWriteBytesPerSec is not None:
self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,
device_name=name)
if device.DiskWritesPerSec is not None:
self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),
device_name=name)
if device.DiskReadBytesPerSec is not None:
self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,
device_name=name)
if device.DiskReadsPerSec is not None:
self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),
device_name=name)
if device.CurrentDiskQueueLength is not None:
self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,
device_name=name)
return self.get_metrics()
|
AttributeError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/system/win32.py/IO.check
|
1,670
|
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (__HOLE__, OGRException):
return None
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/gdal/field.py/OFTDate.value
|
1,671
|
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (__HOLE__, OGRException):
return None
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/gdal/field.py/OFTDateTime.value
|
1,672
|
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (__HOLE__, OGRException):
return None
# List fields are also just subclasses
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/gdal/field.py/OFTTime.value
|
1,673
|
@register.filter
def namesake_methods(parent_klass, name):
namesakes = [m for m in parent_klass.get_methods() if m.name == name]
assert(namesakes)
# Get the methods in order of the klasses
try:
result = [next((m for m in namesakes if m.klass == parent_klass))]
namesakes.pop(namesakes.index(result[0]))
except __HOLE__:
result = []
for klass in parent_klass.get_all_ancestors():
# Move the namesakes from the methods to the results
try:
method = next((m for m in namesakes if m.klass == klass))
namesakes.pop(namesakes.index(method))
result.append(method)
except StopIteration:
pass
assert(not namesakes)
return result
|
StopIteration
|
dataset/ETHPy150Open refreshoxford/django-cbv-inspector/cbv/templatetags/cbv_tags.py/namesake_methods
|
1,674
|
@register.inclusion_tag('cbv/includes/nav.html')
def nav(version, module=None, klass=None):
other_versions = ProjectVersion.objects.filter(project=version.project).exclude(pk=version.pk)
context = {
'version': version,
}
if module:
context['this_module'] = module
if klass:
context['this_klass'] = klass
other_versions_of_klass = Klass.objects.filter(
name=klass.name,
module__project_version__in=other_versions,
)
other_versions_of_klass_dict = {x.module.project_version: x for x in other_versions_of_klass}
for other_version in other_versions:
try:
other_klass = other_versions_of_klass_dict[other_version]
except __HOLE__:
pass
else:
other_version.url = other_klass.get_absolute_url()
context['other_versions'] = other_versions
return context
|
KeyError
|
dataset/ETHPy150Open refreshoxford/django-cbv-inspector/cbv/templatetags/cbv_tags.py/nav
|
1,675
|
def run(self, channel, name, icon_emoji, message, *args, **kwargs):
try:
# This URL looks like this:
# http://hooks.slack.com/services/T024TTTTT/BBB72BBL/AZAAA9u0pA4ad666eMgbi555
# (not a real api url, don't try it :)
#
# You can get this url by adding an incoming webhook:
# https://nextdoor.slack.com/apps/new/A0F7XDUAZ-incoming-webhooks
url = os.environ['SIMPLE_SCHEDULER_SLACK_URL']
except __HOLE__:
logger.error('Environment variable SIMPLE_SCHEDULER_SLACK_URL is not specified. '
'So we cannot send slack message.')
raise KeyError('You have to set Environment variable SIMPLE_SCHEDULER_SLACK_URL first.')
else:
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=self.MAX_RETRIES)
session.mount('http://', adapter)
session.mount('https://', adapter)
message += ' // `sent from %s`' % socket.gethostname()
payload = {
'channel': channel,
'username': name,
'text': message,
'link_names': 1,
"mrkdwn": 1,
'icon_emoji': icon_emoji
}
session.request('POST', url, timeout=self.TIMEOUT,
headers={'content-type': 'application/json'},
data=json.dumps(payload))
|
KeyError
|
dataset/ETHPy150Open Nextdoor/ndscheduler/simple_scheduler/jobs/slack_job.py/SlackJob.run
|
1,676
|
def setup(hass, config): # pylint: disable=too-many-locals
"""Setup the MySensors component."""
if not validate_config(config,
{DOMAIN: [CONF_GATEWAYS]},
_LOGGER):
return False
if not all(CONF_DEVICE in gateway
for gateway in config[DOMAIN][CONF_GATEWAYS]):
_LOGGER.error('Missing required configuration items '
'in %s: %s', DOMAIN, CONF_DEVICE)
return False
import mysensors.mysensors as mysensors
version = str(config[DOMAIN].get(CONF_VERSION, DEFAULT_VERSION))
is_metric = (hass.config.temperature_unit == TEMP_CELSIUS)
persistence = config[DOMAIN].get(CONF_PERSISTENCE, True)
def setup_gateway(device, persistence_file, baud_rate, tcp_port):
"""Return gateway after setup of the gateway."""
try:
socket.inet_aton(device)
# valid ip address
gateway = mysensors.TCPGateway(
device, event_callback=None, persistence=persistence,
persistence_file=persistence_file, protocol_version=version,
port=tcp_port)
except __HOLE__:
# invalid ip address
gateway = mysensors.SerialGateway(
device, event_callback=None, persistence=persistence,
persistence_file=persistence_file, protocol_version=version,
baud=baud_rate)
gateway.metric = is_metric
gateway.debug = config[DOMAIN].get(CONF_DEBUG, False)
optimistic = config[DOMAIN].get(CONF_OPTIMISTIC, False)
gateway = GatewayWrapper(gateway, version, optimistic)
# pylint: disable=attribute-defined-outside-init
gateway.event_callback = gateway.callback_factory()
def gw_start(event):
"""Callback to trigger start of gateway and any persistence."""
gateway.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
lambda event: gateway.stop())
if persistence:
for node_id in gateway.sensors:
gateway.event_callback('persistence', node_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, gw_start)
return gateway
# Setup all devices from config
global GATEWAYS
GATEWAYS = {}
conf_gateways = config[DOMAIN][CONF_GATEWAYS]
if isinstance(conf_gateways, dict):
conf_gateways = [conf_gateways]
for index, gway in enumerate(conf_gateways):
device = gway[CONF_DEVICE]
persistence_file = gway.get(
CONF_PERSISTENCE_FILE,
hass.config.path('mysensors{}.pickle'.format(index + 1)))
baud_rate = gway.get(CONF_BAUD_RATE, DEFAULT_BAUD_RATE)
tcp_port = gway.get(CONF_TCP_PORT, DEFAULT_TCP_PORT)
GATEWAYS[device] = setup_gateway(
device, persistence_file, baud_rate, tcp_port)
for (component, discovery_service) in DISCOVERY_COMPONENTS:
# Ensure component is loaded
if not bootstrap.setup_component(hass, component, config):
return False
# Fire discovery event
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: discovery_service,
ATTR_DISCOVERED: {}})
return True
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/mysensors.py/setup
|
1,677
|
def getObjectType(
configId,
objectTypeRetriever=[
_post7objectTypeRetriever, _pre7objectTypeRetriever
]
):
try:
typeName = objectTypeRetriever[0](str(configId))
except __HOLE__, ae:
if 'getObjectType' not in ae.args:
raise
logger.warning(
'default method of retrieving object types has failed, '
'falling back to reflection-based mechanism'
)
# from now on, the default type retriever will be the one using
# reflection
objectTypeRetriever[0] = objectTypeRetriever[1]
typeName = getObjectType(configId)
getTypeInfo(typeName)
return typeName
|
AttributeError
|
dataset/ETHPy150Open WDR/WDR/lib/common/wdr/config.py/getObjectType
|
1,678
|
def test_not_implemented_plugin(self):
auth = authinfo.ApiAuth("username", "password")
hostname = "hostname"
plugin = dnsupdater.DnsUpdaterPlugin(auth, hostname)
try:
plugin.update_dns("10.1.1.1")
self.fail("Not implemented plugin should fail: "
"'NoneType' object has no attribute 'format'")
except __HOLE__ as e:
self.assertEqual(str(e), "'NoneType' object has no attribute "
"'format'",
"_get_base_url() should return 'NoneType'")
except Exception as e:
self.fail("_get_base_url() should return 'AttributeError'. "
"Got %s:%s" % (type(e).__name__, e))
|
AttributeError
|
dataset/ETHPy150Open pv8/noipy/test/test_noipy.py/GeneralTest.test_not_implemented_plugin
|
1,679
|
def is_subclass_at_all(cls, class_info):
"""Return whether ``cls`` is a subclass of ``class_info``.
Even if ``cls`` is not a class, don't crash. Return False instead.
"""
try:
return issubclass(cls, class_info)
except __HOLE__:
return False
|
TypeError
|
dataset/ETHPy150Open django-nose/django-nose/django_nose/utils.py/is_subclass_at_all
|
1,680
|
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
try:
while True:
v = getattr(v, v._meta.pk.name)
except __HOLE__:
pass
return v
if lookup_type == 'exact':
return [pk_trace(value)]
if lookup_type == 'in':
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/RelatedField.get_db_prep_lookup
|
1,681
|
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Clear the cache, if it exists
try:
delattr(value, self.related.field.get_cache_name())
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/SingleRelatedObjectDescriptor.__set__
|
1,682
|
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except __HOLE__:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
rel_obj = self.field.rel.to._default_manager.get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/ReverseSingleRelatedObjectDescriptor.__get__
|
1,683
|
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except __HOLE__:
val = None
setattr(instance, self.field.attname, val)
# Clear the cache, if it exists
try:
delattr(instance, self.field.get_cache_name())
except AttributeError:
pass
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/ReverseSingleRelatedObjectDescriptor.__set__
|
1,684
|
def __init__(self, to, to_field=None, **kwargs):
try:
to_name = to._meta.object_name.lower()
except __HOLE__: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "ForeignKey(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if kwargs.has_key('edit_inline_type'):
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.")
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = ManyToOneRel(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 3),
min_num_in_admin=kwargs.pop('min_num_in_admin', None),
max_num_in_admin=kwargs.pop('max_num_in_admin', None),
num_extra_on_change=kwargs.pop('num_extra_on_change', 1),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
Field.__init__(self, **kwargs)
self.db_index = True
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/ForeignKey.__init__
|
1,685
|
def __init__(self, to, to_field=None, **kwargs):
try:
to_name = to._meta.object_name.lower()
except __HOLE__: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "OneToOneField(%r) is invalid. First parameter to OneToOneField must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if kwargs.has_key('edit_inline_type'):
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.")
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = OneToOneRel(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 0),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
kwargs['primary_key'] = True
IntegerField.__init__(self, **kwargs)
self.db_index = True
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/OneToOneField.__init__
|
1,686
|
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except __HOLE__:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise validators.ValidationError, ngettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.", len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
}
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/ManyToManyField.isValidIDList
|
1,687
|
def __init__(self, to, field_name, num_in_admin=3, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=1, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None, raw_id_admin=False):
try:
to._meta
except __HOLE__: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.min_num_in_admin, self.max_num_in_admin = min_num_in_admin, max_num_in_admin
self.num_extra_on_change, self.related_name = num_extra_on_change, related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = True
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/related.py/ManyToOneRel.__init__
|
1,688
|
def prepare(args, settings, parser):
settings['resource'] = BytesIO(b'')
settings['survey'] = True
settings['receive_postbacks'] = False
def get_ping_command(settings, unwrapped=None):
params = OrderedDict()
if settings['require_knock']:
params['knock'] = settings['knock']
if args.message:
params['message'] = args.message
if args.return_code:
params['return'] = args.return_code
return "curl -LSs 'http{ssl}://{host}:{port}/r{query_params}'".format(
ssl="s" if settings['ssl'] is not False else "",
host=settings['display_host'],
port=settings['display_port'],
query_params="?"+urllib.parse.urlencode(
params) if len(params)>0 else ""
)
settings['get_curlbomb_command'] = get_ping_command
def get_callback(request):
"""Callback that server runs on ping from client
request - the tornado.web.HTTPRequest from the client
"""
# Handle return code parameter:
return_code = request.arguments.get('return', [b"0"])[0]
message = request.arguments.get('message', [b""])[0].decode("utf-8")
try:
return_code = int(return_code)
except ValueError:
log.warn("Client ping specified non-integer return code: {}".format(
return_code))
return_code = 0
# Only change the return code if it's not 0.
# This way multiple clients can ping and
# all must be successful to return 0:
if return_code != 0 and not args.return_success:
settings['return_code'] = return_code
# Handle notification command (-c)
if args.command is not None:
command = args.command.format(
return_code=return_code, message='"{}"'.format(message.replace(r'"',r'\"')))
log.info("Running notification command: {}".format(shlex.split(command)))
out = subprocess.Popen(shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
log.info("notification out: {}".format(out))
# Handle desktop notification (-n)
if args.notify:
try:
import notify2
notify2.init("curlbomb")
except __HOLE__:
log.error("Desktop notifications are disabled. Please install "
"python-notify2 package to enable.")
else:
notify2.Notification("Ping", message).show()
settings['get_callback'] = get_callback
|
ImportError
|
dataset/ETHPy150Open EnigmaCurry/curlbomb/curlbomb/ping.py/prepare
|
1,689
|
def run(self, edit):
view = self.view
sels = view.sel()
sel = sels[0]
if 'orgmode.link.internal' not in view.scope_name(sel.end()):
return
region = view.extract_scope(sel.end())
content = view.substr(region).strip()
if content.startswith('{{') and content.endswith('}}'):
content = '* %s' % content[2:-2]
found = self.view.find(content, region.end(), sublime.LITERAL)
if not found: # Try wrapping around buffer.
found = self.view.find(content, 0, sublime.LITERAL)
same = region.a == found.a and region.b == found.b
if not found or same:
sublime.status_message('No sibling found for: %s' % content)
return
found = view.extract_scope(found.begin())
sels.clear()
sels.add(sublime.Region(found.begin()))
try:
import show_at_center_and_blink
view.run_command('show_at_center_and_blink')
except __HOLE__:
view.show_at_center(found)
|
ImportError
|
dataset/ETHPy150Open danielmagnussons/orgmode/orgmode.py/OrgmodeCycleInternalLinkCommand.run
|
1,690
|
def run(self, commands=None, default_command=None, context=None):
"""
Context: A dict of namespaces as the key, and their objects as the
value. Used to easily inject code into the shell's runtime env.
"""
if commands:
self._commands.update(commands)
# HACK: Overriding the old shell isn't cool.
# Should do it by default.
from alchemist.commands import Shell
self._commands['shell'] = Shell(context=context)
if default_command is not None and len(sys.argv) == 1:
sys.argv.append(default_command)
try:
result = self.handle(sys.argv[0], sys.argv[1:])
except __HOLE__ as e:
result = e.code
sys.exit(result or 0)
# Monkey path flask-script (until it can better handle normal WSGI
# applications)
|
SystemExit
|
dataset/ETHPy150Open concordusapps/alchemist/alchemist/management.py/Manager.run
|
1,691
|
def _parse_author(commit_name, value, type):
try:
(real_name, rest) = value.split(' <', 1)
except ValueError:
msg = 'error parsing %s: no email address found' % (type,)
raise BadCommitError(commit_name, msg)
try:
(email, rest) = rest.split('> ', 1)
except ValueError:
msg = 'error parsing %s: unterminated email address' % (type,)
raise BadCommitError(commit_name, msg)
try:
timestamp = _parse_timestamp(rest)
except __HOLE__:
msg = 'error parsing %s: malformatted timestamp' % (type,)
raise BadCommitError(commit_name, msg)
return AuthorInfo(real_name, email, timestamp)
|
ValueError
|
dataset/ETHPy150Open facebookarchive/git-review/src/gitreview/git/commit.py/_parse_author
|
1,692
|
def _parse_header(commit_name, header):
tree = None
parents = []
author = None
committer = None
# We accept the headers in any order.
# git itself requires them to be tree, parents, author, committer
for line in header.split('\n'):
try:
(name, value) = line.split(' ', 1)
except __HOLE__:
msg = 'bad commit header line %r' % (line)
raise BadCommitError(commit_name, msg)
if name == 'tree':
if tree:
msg = 'multiple trees specified'
raise BadCommitError(commit_name, msg)
tree = value
elif name == 'parent':
parents.append(value)
elif name == 'author':
if author:
msg = 'multiple authors specified'
raise BadCommitError(commit_name, msg)
author = _parse_author(commit_name, value, name)
elif name == 'committer':
if committer:
msg = 'multiple committers specified'
raise BadCommitError(commit_name, msg)
committer = _parse_author(commit_name, value, name)
else:
msg = 'unknown header field %r' % (name,)
raise BadCommitError(commit_name, msg)
if not tree:
msg = 'no tree specified'
raise BadCommitError(commit_name, msg)
if not author:
msg = 'no author specified'
raise BadCommitError(commit_name, msg)
if not committer:
msg = 'no committer specified'
raise BadCommitError(commit_name, msg)
return (tree, parents, author, committer)
|
ValueError
|
dataset/ETHPy150Open facebookarchive/git-review/src/gitreview/git/commit.py/_parse_header
|
1,693
|
def get_commit(repo, name):
# Handle the special internal commit names COMMIT_INDEX and COMMIT_WD
if name == constants.COMMIT_INDEX:
return get_index_commit(repo)
elif name == constants.COMMIT_WD:
return get_working_dir_commit(repo)
# Get the SHA1 value for this commit.
sha1 = repo.getCommitSha1(name)
# Run "git cat-file commit <name>"
cmd = ['cat-file', 'commit', str(name)]
out = repo.runSimpleGitCmd(cmd)
# Split the header and body
try:
(header, body) = out.split('\n\n', 1)
except __HOLE__:
# split() resulted in just one value
# Treat it as headers, with an empty body
header = out
if header and header[-1] == '\n':
header = header[:-1]
body = ''
# Parse the header
(tree, parents, author, committer) = _parse_header(name, header)
return Commit(repo, sha1, tree, parents, author, committer, body)
|
ValueError
|
dataset/ETHPy150Open facebookarchive/git-review/src/gitreview/git/commit.py/get_commit
|
1,694
|
def fetch_val_for_key(key, delete_key=False):
"""Return the overriding config value for a key.
A successful search returns a string value.
An unsuccessful search raises a KeyError
The (decreasing) priority order is:
- THEANO_FLAGS
- ~./theanorc
"""
# first try to find it in the FLAGS
try:
if delete_key:
return THEANO_FLAGS_DICT.pop(key)
return THEANO_FLAGS_DICT[key]
except __HOLE__:
pass
# next try to find it in the config file
# config file keys can be of form option, or section.option
key_tokens = key.rsplit('.', 1)
if len(key_tokens) > 2:
raise KeyError(key)
if len(key_tokens) == 2:
section, option = key_tokens
else:
section, option = 'global', key
try:
try:
return theano_cfg.get(section, option)
except ConfigParser.InterpolationError:
return theano_raw_cfg.get(section, option)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
raise KeyError(key)
|
KeyError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/configparser.py/fetch_val_for_key
|
1,695
|
def AddConfigVar(name, doc, configparam, root=config, in_c_key=True):
"""Add a new variable to theano.config
:type name: string for form "[section0.[section1.[etc]]].option"
:param name: the full name for this configuration variable.
:type doc: string
:param doc: What does this variable specify?
:type configparam: ConfigParam instance
:param configparam: an object for getting and setting this configuration
parameter
:type root: object
:param root: used for recursive calls -- do not provide an argument for
this parameter.
:type in_c_key: boolean
:param in_c_key: If True, then whenever this config option changes, the
key associated to compiled C modules also changes, i.e. it may trigger a
compilation of these modules (this compilation will only be partial if it
turns out that the generated C code is unchanged). Set this option to False
only if you are confident this option should not affect C code compilation.
:returns: None
"""
# This method also performs some of the work of initializing ConfigParam
# instances
if root is config:
# only set the name in the first call, not the recursive ones
configparam.fullname = name
sections = name.split('.')
if len(sections) > 1:
# set up a subobject
if not hasattr(root, sections[0]):
# every internal node in the config tree is an instance of its own
# unique class
class SubObj(object):
_i_am_a_config_class = True
setattr(root.__class__, sections[0], SubObj())
newroot = getattr(root, sections[0])
if (not getattr(newroot, '_i_am_a_config_class', False) or
isinstance(newroot, type)):
raise TypeError(
'Internal config nodes must be config class instances',
newroot)
return AddConfigVar('.'.join(sections[1:]), doc, configparam,
root=newroot, in_c_key=in_c_key)
else:
if hasattr(root, name):
raise AttributeError('This name is already taken',
configparam.fullname)
configparam.doc = doc
configparam.in_c_key = in_c_key
# Trigger a read of the value from config files and env vars
# This allow to filter wrong value from the user.
if not callable(configparam.default):
configparam.__get__(root, type(root), delete_key=True)
else:
# We do not want to evaluate now the default value
# when it is a callable.
try:
fetch_val_for_key(configparam.fullname)
# The user provided a value, filter it now.
configparam.__get__(root, type(root), delete_key=True)
except __HOLE__:
pass
setattr(root.__class__, sections[0], configparam)
_config_var_list.append(configparam)
|
KeyError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/configparser.py/AddConfigVar
|
1,696
|
def __get__(self, cls, type_, delete_key=False):
if cls is None:
return self
if not hasattr(self, 'val'):
try:
val_str = fetch_val_for_key(self.fullname,
delete_key=delete_key)
self.is_default = False
except __HOLE__:
if callable(self.default):
val_str = self.default()
else:
val_str = self.default
self.__set__(cls, val_str)
# print "RVAL", self.val
return self.val
|
KeyError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/configparser.py/ConfigParam.__get__
|
1,697
|
@expose("read-char", [default(values.W_InputPort, None)], simple=False)
def read_char(w_port, env, cont):
try:
return do_read_one(w_port, False, False, env, cont)
except __HOLE__:
raise SchemeException("read-char: string is not a well-formed UTF-8 encoding")
|
UnicodeDecodeError
|
dataset/ETHPy150Open samth/pycket/pycket/prims/input_output.py/read_char
|
1,698
|
@expose("read-byte", [default(values.W_InputPort, None)], simple=False)
def read_byte(w_port, env, cont):
try:
return do_read_one(w_port, True, False, env, cont)
except __HOLE__:
raise SchemeException("read-byte: string is not a well-formed UTF-8 encoding")
|
UnicodeDecodeError
|
dataset/ETHPy150Open samth/pycket/pycket/prims/input_output.py/read_byte
|
1,699
|
@expose("peek-char", [default(values.W_InputPort, None),
default(values.W_Fixnum, values.W_Fixnum.ZERO)],
simple=False)
def peek_char(w_port, w_skip, env, cont):
try:
return do_peek(w_port, False, w_skip.value, env, cont)
except __HOLE__:
raise SchemeException("peek-char: string is not a well-formed UTF-8 encoding")
|
UnicodeDecodeError
|
dataset/ETHPy150Open samth/pycket/pycket/prims/input_output.py/peek_char
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.