Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
7,100
|
def last_run(self):
"Returns the last completed run or None"
try:
query = RunInfoHistory.objects.filter(subject=self)
return query.order_by('-completed')[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/models.py/Subject.last_run
|
7,101
|
def is_last(self):
try:
return self.questionnaire.questionsets()[-1] == self
except __HOLE__:
# should only occur if not yet saved
return True
|
NameError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/models.py/QuestionSet.is_last
|
7,102
|
def is_first(self):
try:
return self.questionnaire.questionsets()[0] == self
except __HOLE__:
# should only occur if not yet saved
return True
|
NameError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/models.py/QuestionSet.is_first
|
7,103
|
def remove_tags(self, tags):
if not self.tags:
return
current_tags = self.tags.split(',')
for tag in tags:
try:
current_tags.remove(tag)
except __HOLE__:
pass
self.tags = ",".join(current_tags)
|
ValueError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/models.py/RunInfo.remove_tags
|
7,104
|
def split_answer(self):
"""
Decode stored answer value and return as a list of choices.
Any freeform value will be returned in a list as the last item.
Calling code should be tolerant of freeform answers outside
of additional [] if data has been stored in plain text format
"""
try:
return json.loads(self.answer)
except __HOLE__:
# this was likely saved as plain text, try to guess what the
# value(s) were
if 'multiple' in self.question.type:
return self.answer.split('; ')
else:
return [self.answer]
|
ValueError
|
dataset/ETHPy150Open seantis/seantis-questionnaire/questionnaire/models.py/Answer.split_answer
|
7,105
|
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if int(IPython.__version__.split(".")[0]) < 3:
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except __HOLE__:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
ImportError
|
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/interactive.py/embed_interactive
|
7,106
|
def _runQueued(self, _TXresult, _TXIntent):
self._aTB_numPendingTransmits -= 1
try:
nextTransmit = self._aTB_queuedPendingTransmits.pop()
except __HOLE__:
return
self._submitTransmit(nextTransmit)
|
IndexError
|
dataset/ETHPy150Open godaddy/Thespian/thespian/system/transport/asyncTransportBase.py/asyncTransportBase._runQueued
|
7,107
|
def __init__(self):
try:
# Needed for Python 3, see [http://bugs.python.org/issue21265]
super(NoInterpolationParser, self).__init__(interpolation=None)
except __HOLE__:
# Python 2.x
cp.ConfigParser.__init__(self)
|
TypeError
|
dataset/ETHPy150Open SmokinCaterpillar/pypet/pypet/pypetlogging.py/NoInterpolationParser.__init__
|
7,108
|
def get_django_recorder():
try:
import django
from django.conf import settings
from pyavatax.models import AvaTaxRecord
except __HOLE__:
return MockDjangoRecorder
else:
if hasattr(settings, 'NO_PYAVATAX_INTEGRATION') and settings.NO_PYAVATAX_INTEGRATION:
return MockDjangoRecorder
else:
class RealDjangoRecorder(object):
@staticmethod
def failure(doc, response):
AvaTaxRecord.objects.create(doc_code=getattr(doc, 'DocCode', None), failure_details=response._details)
@staticmethod
def success(doc):
AvaTaxRecord.objects.filter(doc_code=getattr(doc, 'DocCode', None)).update(success_on=timezone.now())
return RealDjangoRecorder
|
ImportError
|
dataset/ETHPy150Open activefrequency/pyavatax/pyavatax/django_integration.py/get_django_recorder
|
7,109
|
def value_from_raw(self, raw):
if raw.value is None:
return raw.missing_value('Missing sort key')
try:
return int(raw.value.strip())
except __HOLE__:
return raw.bad_value('Bad sort key value')
|
ValueError
|
dataset/ETHPy150Open lektor/lektor-archive/lektor/types/special.py/SortKeyType.value_from_raw
|
7,110
|
def non_string_iterable(obj):
try:
iter(obj)
except __HOLE__:
return False
else:
return not isinstance(obj, str)
# Minimal self test. You'll need a bunch of ICO files in the current working
# directory in order for this to work...
|
TypeError
|
dataset/ETHPy150Open timeyyy/system_tray/system_tray/tray2.py/non_string_iterable
|
7,111
|
def __init__(self, replayFile):
# The replayFile can be either the name of a file or any object that has a 'read()' method.
self.mpq = mpyq.MPQArchive(replayFile)
self.buildStormReplay = protocol15405.decode_replay_header(self.mpq.header['user_data_header']['content'])['m_version']['m_baseBuild']
try:
self.protocol = __import__('s2protocol' + '.protocol%s' % self.buildStormReplay, fromlist=['protocol2'])
except __HOLE__:
raise Exception('Unsupported build number: %i' % self.buildStormReplay)
# Returns a unique string that is shared among all of the 10+ replays involved in this match
|
ImportError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.__init__
|
7,112
|
def getUniqueMatchId(self):
try:
return self.matchId
except __HOLE__:
self.matchId = "todo"
return self.matchId
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getUniqueMatchId
|
7,113
|
def getReplayInitData(self):
try:
return self.replayInitData
except __HOLE__:
self.replayInitData = self.protocol.decode_replay_initdata(self.mpq.read_file('replay.initData'))
return self.replayInitData
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayInitData
|
7,114
|
def getReplayDetails(self):
try:
return self.replayDetails
except __HOLE__:
self.replayDetails = self.protocol.decode_replay_details(self.mpq.read_file('replay.details'))
return self.replayDetails
# returns array indexed by user ID
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayDetails
|
7,115
|
def getReplayPlayers(self):
try:
return self.replayPlayers
except __HOLE__:
self.players = [None] * 10
for i, player in enumerate(self.getReplayDetails()['m_playerList']):
#TODO: confirm that m_workingSetSlotId == i always
toon = player['m_toon']
player['toon_id'] = "%i-%s-%i-%i" % (toon['m_region'], toon['m_programId'], toon['m_realm'], toon['m_id'])
# The m_controlPlayerId is the field value to reference this player in the tracker events
player['m_controlPlayerId'] = i+1
self.players[i] = player
return self.players
# returns array indexed by user ID
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayPlayers
|
7,116
|
def getPlayerSpawnInfo(self):
try:
return self.playerSpawnInfo
except __HOLE__:
self.playerSpawnInfo = [None] * 10
players = self.getReplayPlayers()
playerIdToUserId = {}
for event in self.getReplayTrackerEvents():
if event['_event'] == 'NNet.Replay.Tracker.SPlayerSetupEvent':
playerIdToUserId[event['m_playerId']] = event['m_userId']
elif event['_event'] == 'NNet.Replay.Tracker.SUnitBornEvent':
playerId = event['m_controlPlayerId']
if (playerIdToUserId.has_key(playerId)):
playerIndex = playerIdToUserId[playerId] # always playerId-1 so far, but this is safer
self.playerSpawnInfo[playerIndex] = {
'hero': event['m_unitTypeName'],
'unit_tag': event['m_unitTag']
}
del playerIdToUserId[playerId]
if len(playerIdToUserId) == 0:
break
return self.playerSpawnInfo
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getPlayerSpawnInfo
|
7,117
|
def getReplayMessageEvents(self):
try:
return self.replayMessageEvents
except __HOLE__:
messageGenerator = self.protocol.decode_replay_message_events(self.mpq.read_file('replay.message.events'))
self.replayMessageEvents = []
for event in messageGenerator:
self.replayMessageEvents.append(event)
return self.replayMessageEvents
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayMessageEvents
|
7,118
|
def getMapName(self):
try:
return self.mapName
except __HOLE__:
self.mapName = self.getReplayDetails()['m_title']
return self.mapName
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getMapName
|
7,119
|
def getMatchUTCTimestamp(self):
try:
return self.utcTimestamp
except __HOLE__:
self.utcTimestamp = (self.getReplayDetails()['m_timeUTC'] / 10000000) - 11644473600
return self.utcTimestamp
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getMatchUTCTimestamp
|
7,120
|
def getChat(self):
try:
return self.chat
except __HOLE__:
self.chat = []
for messageEvent in self.getReplayMessageEvents():
if (messageEvent['_event'] != 'NNet.Game.SChatMessage'):
continue
userId = messageEvent['_userid']['m_userId']
chatData = {
't': self.getMatchUTCTimestamp() + messageEvent['_gameloop'] / 16,
'user': userId,
'msg': messageEvent['m_string'],
}
self.chat.append(chatData)
return self.chat
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getChat
|
7,121
|
def getReplayGameEvents(self):
try:
return self.replayGameEvents
except __HOLE__:
generator = self.protocol.decode_replay_game_events(self.mpq.read_file('replay.game.events'))
self.replayGameEvents = []
for event in generator:
self.replayGameEvents.append(event)
return self.replayGameEvents
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayGameEvents
|
7,122
|
def getReplayGameEventsDebug(self):
try:
return self.replayGameEventsDebug
except __HOLE__:
generator = self.protocol.decode_replay_game_events_debug(self.mpq.read_file('replay.game.events'))
self.replayGameEvents = []
try:
i = 0
for event in generator:
event['index'] = i
if (i >= 25000):
return self.replayGameEvents
i = i + 1
except CorruptedError as e:
self.replayGameEvents.append({'error': str(e)});
return self.replayGameEvents
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayGameEventsDebug
|
7,123
|
def getReplayTrackerEvents(self):
try:
return self.replayTrackerEvents
except __HOLE__:
generator = self.protocol.decode_replay_tracker_events(self.mpq.read_file('replay.tracker.events'))
self.replayTrackerEvents = []
for event in generator:
if event.has_key('m_unitTagIndex') and event.has_key('m_unitTagRecycle'):
event['m_unitTag'] = self.protocol.unit_tag(event['m_unitTagIndex'], event['m_unitTagRecycle'])
self.replayTrackerEvents.append(event)
return self.replayTrackerEvents
|
AttributeError
|
dataset/ETHPy150Open karlgluck/heroes-of-the-storm-replay-parser/api/StormReplayParser.py/StormReplayParser.getReplayTrackerEvents
|
7,124
|
@view_config(route_name='shortener_v1', renderer='jsonp')
def url_shortener(request):
incoming_url = request.params.get('url')
if not incoming_url:
raise httpexceptions.HTTPBadRequest()
try:
url_handler = Url(request, url=incoming_url)
except __HOLE__:
raise httpexceptions.HTTPBadRequest()
try:
short_url = url_handler.shorten()
except ShortenGenerationError:
raise httpexceptions.HTTPInternalServerError()
return short_url
|
ValueError
|
dataset/ETHPy150Open gustavofonseca/nurl/nurl/views.py/url_shortener
|
7,125
|
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except __HOLE__ as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(CACHE_ROOT)
|
OSError
|
dataset/ETHPy150Open saltstack/salt/tests/integration/fileclient_test.py/FileclientTest.setUp
|
7,126
|
def notify(self, request_payload):
"""Initiates a review by placing a message on the message queue."""
self.celery.conf.BROKER_URL = self.settings['BROKER_URL']
review_settings = {
'max_comments': self.settings['max_comments'],
}
payload = {
'request': request_payload,
'review_settings': review_settings,
'session': self._login_user(self.settings['user']),
'url': self._rb_url(),
}
if 'tool_profile_id' in request_payload:
tool_profile_id = request_payload.get('tool_profile_id')
try:
profile = Profile.objects.get(pk=tool_profile_id)
except __HOLE__:
logging.error('Error: Profile %s does not exist.',
tool_profile_id)
return
else:
logging.error('Error: Tool profile ID must be specified.')
return
review_settings['ship_it'] = profile.ship_it
review_settings['comment_unmodified'] = profile.comment_unmodified
review_settings['open_issues'] = profile.open_issues
payload['review_settings'] = review_settings
try:
self.celery.send_task(
'reviewbot.tasks.ProcessReviewRequest',
[payload, profile.tool_settings],
queue='%s.%s' % (profile.tool.entry_point,
profile.tool.version))
except:
raise
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/ReviewBot/extension/reviewbotext/extension.py/ReviewBotExtension.notify
|
7,127
|
def print_size(self, text):
size_total = int(self.params.get('upload.maximum_size'))
size_data_total = int(self.params.get('upload.maximum_data_size'))
size_regex = self.params.get('recipe.size.regex')
pattern = re.compile(size_regex, re.M)
result = pattern.findall(text)
if result:
try:
int(result[0])
except TypeError:
result = result[0][:2]
size = sum(int(n) for n in result)
size_percent = size / size_total * 100
size = regular_numner(size)
size_total = regular_numner(size_total)
size_percent = '%.1f' % size_percent
txt = 'Sketch uses {0} bytes ({1}%) '
txt += 'of program storage space. Maximum is {2} bytes.\\n'
self.message_queue.put(txt, size, size_percent, size_total)
size_regex_data = self.params.get('recipe.size.regex.data', '')
if size_regex_data and size_data_total:
pattern = re.compile(size_regex_data, re.M)
result = pattern.findall(text)
if result:
try:
int(result[0])
except __HOLE__:
result = result[0][1:]
size_data = sum(int(n) for n in result)
size_data_percent = size_data / size_data_total * 100
size_data_remain = size_data_total - size_data
size_data = regular_numner(size_data)
size_data_remain = regular_numner(size_data_remain)
size_data_total = regular_numner(size_data_total)
size_data_percent = '%.1f' % size_data_percent
txt = 'Global variables use {0} bytes ({1}%) of dynamic memory, '
txt += 'leaving {2} bytes for local variables. '
txt += 'Maximum is {3} bytes.\\n'
self.message_queue.put(txt, size_data, size_data_percent,
size_data_remain, size_data_total)
|
TypeError
|
dataset/ETHPy150Open Robot-Will/Stino/stino/pyarduino/arduino_compiler.py/Compiler.print_size
|
7,128
|
def is_process_alive(pid):
"""Sends null signal to a process to check if it's alive"""
try:
# Sending the null signal (sig. 0) to the process will check
# pid's validity.
os.kill(pid, 0)
except __HOLE__, e:
# Access denied, but process is alive
return e.errno == errno.EPERM
except:
return False
else:
return True
|
OSError
|
dataset/ETHPy150Open YelpArchive/pushmanager/pushmanager/core/pid.py/is_process_alive
|
7,129
|
def kill_processes(pids):
while pids:
pid = pids.pop()
if is_process_alive(pid):
try:
logging.info("Sending SIGKILL to PID: %d" % pid)
os.kill(pid, 9)
except __HOLE__, e:
if e.errno == errno.ESRCH:
# process is dead already, no need to do anything
pass
else:
raise
else:
# We'll check if the process is dead in a later iteration
pids.insert(0, pid)
|
OSError
|
dataset/ETHPy150Open YelpArchive/pushmanager/pushmanager/core/pid.py/kill_processes
|
7,130
|
def check(path):
try:
logging.info("Checking pidfile '%s'", path)
pids = [int(pid) for pid in open(path).read().strip().split(' ')]
kill_processes(pids)
except __HOLE__, (code, text):
if code == errno.ENOENT:
logging.warning("pidfile '%s' not found" % path)
else:
raise
|
IOError
|
dataset/ETHPy150Open YelpArchive/pushmanager/pushmanager/core/pid.py/check
|
7,131
|
def mkdir(path):
try:
os.mkdir(path)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open arq5x/gemini/gemini/gemini_bcolz.py/mkdir
|
7,132
|
def filter(db, query, user_dict):
# these should be translated to a bunch or or/and statements within gemini
# so they are supported, but must be translated before getting here.
if query == "False" or query is None or query is False:
return []
if "any(" in query or "all(" in query or \
("sum(" in query and not query.startswith("sum(") and query.count("sum(") == 1):
return None
user_dict['where'] = np.where
if query.startswith("not "):
# "~" is not to numexpr.
query = "~" + query[4:]
sum_cmp = False
if query.startswith("sum("):
assert query[-1].isdigit()
query, sum_cmp = query[4:].rsplit(")", 1)
query = "(%s) %s" % (query, sum_cmp)
query = query.replace(".", "__")
query = " & ".join("(%s)" % token for token in query.split(" and "))
query = " | ".join("(%s)" % token for token in query.split(" or "))
import database
conn, metadata = database.get_session_metadata(db)
samples = get_samples(metadata)
# convert gt_col[index] to gt_col__sample_name
patt = "(%s)\[(\d+)\]" % "|".join((g[0] for g in gt_cols_types))
def subfn(x):
"""Turn gt_types[1] into gt_types__sample"""
field, idx = x.groups()
return "%s__%s" % (field, fix_sample_name(samples[int(idx)]))
query = re.sub(patt, subfn, query)
if os.environ.get('GEMINI_DEBUG') == 'TRUE':
print >>sys.stderr, query[:250] + "..."
carrays = load(db, query=query)
if len(carrays) == 0 or max(len(carrays[c]) for c in carrays) == 0 or \
any(not any(carrays[c]) for c in carrays):
# need this 2nd check above because of the place-holders in load()
raise NoGTIndexException
# loop through and create a cache of "$gt__$sample"
for gt_col in carrays:
if not gt_col in query: continue
for i, sample_array in enumerate(carrays[gt_col]):
sample = fix_sample_name(samples[i])
if not sample in query: continue
user_dict["%s__%s" % (gt_col, sample)] = sample_array
# had to special-case count. it won't quite be as efficient
if "|count|" in query:
tokens = query[2:-2].split("|count|")
icmp = tokens[-1]
# a list of carrays, so not in memory.
res = [bcolz.eval(tok, user_dict=user_dict) for tok in tokens[:-1]]
# in memory after this, but just a single axis array.
res = np.sum(res, axis=0)
res = ne.evaluate('res%s' % icmp)
else:
res = bcolz.eval(query, user_dict=user_dict)
try:
if res.shape[0] == 1 and len(res.shape) > 1:
res = res[0]
except __HOLE__:
return []
variant_ids, = np.where(res)
#variant_ids = np.array(list(bcolz.eval(query, user_dict=user_dict,
# vm="numexpr").wheretrue()))
# variant ids are 1-based.
if len(variant_ids) > 0:
return 1 + variant_ids
else:
return []
|
AttributeError
|
dataset/ETHPy150Open arq5x/gemini/gemini/gemini_bcolz.py/filter
|
7,133
|
def test_validate_bug_tracker(self):
"""Testing bug tracker url form field validation"""
# Invalid - invalid format specification types
self.assertRaises(ValidationError, validate_bug_tracker, "%20")
self.assertRaises(ValidationError, validate_bug_tracker, "%d")
# Invalid - too many format specification types
self.assertRaises(ValidationError, validate_bug_tracker, "%s %s")
# Invalid - no format specification types
self.assertRaises(ValidationError, validate_bug_tracker, "www.a.com")
# Valid - Escaped %'s, with a valid format specification type
try:
validate_bug_tracker("%%20%s")
except __HOLE__:
self.assertFalse(True, "validate_bug_tracker() raised a "
"ValidationError when no error was "
"expected.")
|
ValidationError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/admin/tests.py/ValidatorTests.test_validate_bug_tracker
|
7,134
|
def geocode(self, query, **kwargs):
"""
Given a string to search for, return the results from OpenCage's Geocoder.
:param string query: String to search for
:returns: Dict results
:raises InvalidInputError: if the query string is not a unicode string
:raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again
:raises UnknownError: if something goes wrong with the OpenCage API
"""
if six.PY2:
# py3 doesn't have unicode() function, and instead we check the text_type later
try:
query = unicode(query)
except UnicodeDecodeError:
raise InvalidInputError(bad_value=query)
if not isinstance(query, six.text_type):
raise InvalidInputError(bad_value=query)
data = {
'q': query,
'key': self.key
}
# Add user parameters
data.update(kwargs)
url = self.url
response = requests.get(url, params=data)
if (response.status_code == 402 or response.status_code == 429):
# Rate limit exceeded
reset_time = datetime.utcfromtimestamp(response.json()['rate']['reset'])
raise RateLimitExceededError(reset_to=int(response.json()['rate']['limit']), reset_time=reset_time)
elif response.status_code == 500:
raise UnknownError("500 status code from API")
try:
response_json = response.json()
except __HOLE__:
raise UnknownError("Non-JSON result from server")
if 'results' not in response_json:
raise UnknownError("JSON from API doesn't have a 'results' key")
return floatify_latlng(response_json['results'])
|
ValueError
|
dataset/ETHPy150Open OpenCageData/python-opencage-geocoder/opencage/geocoder.py/OpenCageGeocode.geocode
|
7,135
|
def float_if_float(float_string):
try:
float_val = float(float_string)
return float_val
except __HOLE__:
return float_string
|
ValueError
|
dataset/ETHPy150Open OpenCageData/python-opencage-geocoder/opencage/geocoder.py/float_if_float
|
7,136
|
def manipulator_validator_unique(f, opts, self, field_data, all_data):
"Validates that the value is unique for this field."
lookup_type = f.get_validator_unique_lookup_type()
try:
old_obj = self.manager.get(**{lookup_type: field_data})
except __HOLE__:
return
if getattr(self, 'original_object', None) and self.original_object._get_pk_val() == old_obj._get_pk_val():
return
raise validators.ValidationError, gettext("%(optname)s with this %(fieldname)s already exists.") % {'optname': capfirst(opts.verbose_name), 'fieldname': f.verbose_name}
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/__init__.py/manipulator_validator_unique
|
7,137
|
def get_db_prep_lookup(self, lookup_type, value):
"Returns field's value prepared for database lookup."
if lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte', 'month', 'day', 'search'):
return [value]
elif lookup_type in ('range', 'in'):
return value
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [prep_for_like_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
try:
value = int(value)
except __HOLE__:
raise ValueError("The __year lookup type requires an integer argument")
return ['%s-01-01 00:00:00' % value, '%s-12-31 23:59:59.999999' % value]
raise TypeError("Field has invalid lookup: %s" % lookup_type)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/__init__.py/Field.get_db_prep_lookup
|
7,138
|
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, __HOLE__):
raise validators.ValidationError, gettext("This value must be an integer.")
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/__init__.py/AutoField.to_python
|
7,139
|
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
validators.isValidANSIDate(value, None)
try:
return datetime.date(*time.strptime(value, '%Y-%m-%d')[:3])
except __HOLE__:
raise validators.ValidationError, gettext('Enter a valid date in YYYY-MM-DD format.')
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/__init__.py/DateField.to_python
|
7,140
|
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5])
except __HOLE__: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3])
except ValueError:
raise validators.ValidationError, gettext('Enter a valid date/time in YYYY-MM-DD HH:MM format.')
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/fields/__init__.py/DateTimeField.to_python
|
7,141
|
def update_result(self, context): # NOQA
super(GlbCorp, self).update_result(context)
self.monitor.stop()
iteration = 0
results = []
with open(self.logcat_log) as fh:
try:
line = fh.next()
result_lines = []
while True:
if OLD_RESULT_START_REGEX.search(line):
self.preamble_regex = OLD_PREAMBLE_REGEX
self.result_start_regex = OLD_RESULT_START_REGEX
elif NEW_RESULT_START_REGEX.search(line):
self.preamble_regex = NEW_PREAMBLE_REGEX
self.result_start_regex = NEW_RESULT_START_REGEX
if self.result_start_regex and self.result_start_regex.search(line):
result_lines.append('{')
line = fh.next()
while self.preamble_regex.search(line):
result_lines.append(self.preamble_regex.sub('', line))
line = fh.next()
try:
result = json.loads(''.join(result_lines))
results.append(result)
if iteration:
suffix = '_{}'.format(iteration)
else:
suffix = ''
for sub_result in result['results']:
frames = sub_result['score']
elapsed_time = sub_result['elapsed_time'] / 1000
fps = frames / elapsed_time
context.result.add_metric('score' + suffix, frames, 'frames')
context.result.add_metric('fps' + suffix, fps)
except ValueError:
self.logger.warning('Could not parse result for iteration {}'.format(iteration))
result_lines = []
iteration += 1
line = fh.next()
except __HOLE__:
pass # EOF
if results:
outfile = os.path.join(context.output_directory, 'glb-results.json')
with open(outfile, 'wb') as wfh:
json.dump(results, wfh, indent=4)
|
StopIteration
|
dataset/ETHPy150Open ARM-software/workload-automation/wlauto/workloads/glbcorp/__init__.py/GlbCorp.update_result
|
7,142
|
def postOptions(self):
if not self['control-node']:
raise UsageError("Control node address must be provided.")
if not self['cert-directory']:
raise UsageError("Certificates directory must be provided.")
if self['wait'] is not None:
try:
self['wait'] = int(self['wait'])
except __HOLE__:
raise UsageError("The wait timeout must be an integer.")
|
ValueError
|
dataset/ETHPy150Open ClusterHQ/flocker/benchmark/cluster_cleanup.py/ScriptOptions.postOptions
|
7,143
|
def _calculate_log_likelihood(self):
#if self.m == None:
# Give error message
R = zeros((self.n, self.n))
X, Y = array(self.X), array(self.Y)
thetas = 10.**self.thetas
#weighted distance formula
for i in range(self.n):
R[i, i+1:self.n] = e**(-sum(thetas*(X[i] - X[i+1:self.n])**2., 1))
R = R*(1.0 - self.nugget)
R = R + R.T + eye(self.n)
self.R = R
one = ones(self.n)
try:
self.R_fact = cho_factor(R)
rhs = vstack([Y, one]).T
R_fact = (self.R_fact[0].T, not self.R_fact[1])
cho = cho_solve(R_fact, rhs).T
self.mu = dot(one, cho[0])/dot(one, cho[1])
ymdotone = Y - dot(one, self.mu)
self.sig2 = dot(ymdotone, cho_solve(self.R_fact,
(ymdotone)))/self.n
#self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))-sum(thetas)
self.log_likelihood = -self.n/2.*log(self.sig2) - \
1./2.*log(abs(det(self.R) + 1.e-16))
except (linalg.LinAlgError, __HOLE__):
#------LSTSQ---------
self.R_fact = None # reset this to none, so we know not to use cholesky
# self.R = self.R+diag([10e-6]*self.n) # improve conditioning[Booker et al., 1999]
rhs = vstack([Y, one]).T
lsq = lstsq(self.R.T, rhs)[0].T
self.mu = dot(one, lsq[0])/dot(one, lsq[1])
ymdotone = Y - dot(one, self.mu)
self.sig2 = dot(ymdotone, lstsq(self.R, ymdotone)[0])/self.n
self.log_likelihood = -self.n/2.*log(self.sig2) - \
1./2.*log(abs(det(self.R) + 1.e-16))
#print self.log_likelihood
|
ValueError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/surrogatemodels/kriging_surrogate.py/KrigingSurrogate._calculate_log_likelihood
|
7,144
|
def getheightwidth(self):
""" getwidth() -> (int, int)
Return the height and width of the console in characters
https://groups.google.com/forum/#!msg/comp.lang.python/CpUszNNXUQM/QADpl11Z-nAJ"""
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except __HOLE__:
height, width = struct.unpack(
"hhhh", ioctl(0, termios.TIOCGWINSZ ,"\000"*8))[0:2]
if not height:
return 25, 80
return height, width
|
KeyError
|
dataset/ETHPy150Open gapato/livestreamer-curses/src/livestreamer_curses/streamlist.py/StreamList.getheightwidth
|
7,145
|
def check_stopped_streams(self):
finished = self.q.get_finished()
for f in finished:
for s in self.streams:
try:
i = self.filtered_streams.index(s)
except __HOLE__:
continue
if f == s['id']:
self.set_footer('Stream {0} has stopped'.format(s['name']))
if i == self.pads[self.current_pad].getyx()[0]:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.pads['streams'].addstr(i, PLAYING_FIELD_OFFSET,
self.config.INDICATORS[s['online']], attr)
self.refresh_current_pad()
|
ValueError
|
dataset/ETHPy150Open gapato/livestreamer-curses/src/livestreamer_curses/streamlist.py/StreamList.check_stopped_streams
|
7,146
|
def run_from_command_line():
logging.basicConfig(level=logging.INFO, format='%(message)s')
try:
relation_name = iepy.instance.rules.RELATION
except AttributeError:
logging.error("RELATION not defined in rules file")
sys.exit(1)
try:
relation = models.Relation.objects.get(name=relation_name)
except __HOLE__:
logging.error("Relation {!r} not found".format(relation_name))
sys.exit(1)
# Load rules
rules = load_rules()
# Load evidences
evidences = CandidateEvidenceManager.candidates_for_relation(relation)
# Run the pipeline
iextractor = RuleBasedCore(relation, rules)
iextractor.start()
iextractor.process()
predictions = iextractor.predict(evidences)
output.dump_output_loop(predictions)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open machinalis/iepy/iepy/instantiation/iepy_rules_runner.py/run_from_command_line
|
7,147
|
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
"""
try:
from M2Crypto import EVP
except __HOLE__:
raise NotImplementedError("Boto depends on the python M2Crypto "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# if private_key_file is a file object read the key string from there
if isinstance(private_key_file, file):
private_key_string = private_key_file.read()
# Now load key and calculate signature
if private_key_string:
key = EVP.load_key_string(private_key_string)
else:
key = EVP.load_key(private_key_file)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature
|
ImportError
|
dataset/ETHPy150Open darcyliu/storyboard/boto/cloudfront/distribution.py/Distribution._sign_string
|
7,148
|
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
try:
# only works in Python 3
files = dict(conn.mlsd())
except __HOLE__:
files = dict(mlsd(conn))
return files
|
AttributeError
|
dataset/ETHPy150Open airbnb/airflow/airflow/contrib/hooks/ftp_hook.py/FTPHook.describe_directory
|
7,149
|
def CreateServer(self):
with open(self.filename) as stream:
appinfo_external = appinfo_includes.Parse(stream)
appengine_config = vmconfig.BuildVmAppengineEnvConfig()
vmstub.Register(vmstub.VMStub(appengine_config.default_ticket))
if 'googleclouddebugger' in sys.modules:
try:
googleclouddebugger.AttachDebugger()
except Exception as e:
logging.warn('Exception while initializing Cloud Debugger: %s',
traceback.format_exc(e))
try:
import appengine_config as user_appengine_config
except __HOLE__:
pass
app = meta_app.FullyWrappedApp(appinfo_external, appengine_config)
self.server = self.server_class(self.host, self.port, app,
appinfo_external)
logging.info('Configured server on %s:%s', self.host, self.port)
|
ImportError
|
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/vmservice.py/VmService.CreateServer
|
7,150
|
@classmethod
def check(cls):
super(YouTubeView, cls).check()
try:
from gdata.youtube.service import YouTubeService
except ImportError:
raise DisableView('YouTubeView requires "gdata" library '
'(http://pypi.python.org/pypi/gdata)')
try:
django_settings.ADMINFILES_YOUTUBE_USER
except __HOLE__:
raise DisableView('YouTubeView requires '
'ADMINFILES_YOUTUBE_USER setting')
|
AttributeError
|
dataset/ETHPy150Open carljm/django-adminfiles/adminfiles/views.py/YouTubeView.check
|
7,151
|
@classmethod
def check(cls):
super(FlickrView, cls).check()
try:
import flickrapi
except __HOLE__:
raise DisableView('FlickrView requires the "flickrapi" library '
'(http://pypi.python.org/pypi/flickrapi)')
try:
django_settings.ADMINFILES_FLICKR_USER
django_settings.ADMINFILES_FLICKR_API_KEY
except AttributeError:
raise DisableView('FlickrView requires '
'ADMINFILES_FLICKR_USER and '
'ADMINFILES_FLICKR_API_KEY settings')
|
ImportError
|
dataset/ETHPy150Open carljm/django-adminfiles/adminfiles/views.py/FlickrView.check
|
7,152
|
@classmethod
def check(cls):
super(VimeoView, cls).check()
try:
django_settings.ADMINFILES_VIMEO_USER
except AttributeError:
raise DisableView('VimeoView requires '
'ADMINFILES_VIMEO_USER setting')
try:
cls.pages = django_settings.ADMINFILES_VIMEO_PAGES
except __HOLE__:
cls.pages = 1
if cls.pages > 3:
cls.pages = 3
|
AttributeError
|
dataset/ETHPy150Open carljm/django-adminfiles/adminfiles/views.py/VimeoView.check
|
7,153
|
def _get_videos(self, url):
import urllib2
try:
import xml.etree.ElementTree as ET
except __HOLE__:
import elementtree.ElementTree as ET
request = urllib2.Request(url)
request.add_header('User-Agent', 'django-adminfiles/0.x')
root = ET.parse(urllib2.build_opener().open(request)).getroot()
videos = []
for v in root.findall('video'):
videos.append({
'title': v.find('title').text,
'upload_date': v.find('upload_date').text.split()[0],
'description': v.find('description').text,
'thumb': v.find('thumbnail_small').text,
'url': v.find('url').text,
})
return videos
|
ImportError
|
dataset/ETHPy150Open carljm/django-adminfiles/adminfiles/views.py/VimeoView._get_videos
|
7,154
|
def get_enabled_browsers():
"""
Check the ADMINFILES_BROWSER_VIEWS setting and return a list of
instantiated browser views that have the necessary
dependencies/configuration to run.
"""
global _enabled_browsers_cache
if _enabled_browsers_cache is not None:
return _enabled_browsers_cache
enabled = []
for browser_path in settings.ADMINFILES_BROWSER_VIEWS:
try:
view_class = import_browser(browser_path)
except __HOLE__:
continue
if not issubclass(view_class, BaseView):
continue
browser = view_class
try:
browser.check()
except DisableView:
continue
enabled.append(browser)
_enabled_browsers_cache = enabled
return enabled
|
ImportError
|
dataset/ETHPy150Open carljm/django-adminfiles/adminfiles/views.py/get_enabled_browsers
|
7,155
|
def __enter__(self):
try:
return next(self.gen)
except __HOLE__:
raise RuntimeError("generator didn't yield")
|
StopIteration
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/contextlib.py/_GeneratorContextManager.__enter__
|
7,156
|
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except __HOLE__:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
|
StopIteration
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/contextlib.py/_GeneratorContextManager.__exit__
|
7,157
|
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except __HOLE__:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
|
AttributeError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/contextlib.py/ExitStack.push
|
7,158
|
def write_config(self):
""" Generates the shader configuration for the common inputs """
content = self._input_ubo.generate_shader_code()
try:
# Try to write the temporary file
with open("/$$rptemp/$$main_scene_data.inc.glsl", "w") as handle:
handle.write(content)
except __HOLE__ as msg:
self.error("Failed to write common resources shader configuration!", msg)
|
IOError
|
dataset/ETHPy150Open tobspr/RenderPipeline/rpcore/common_resources.py/CommonResources.write_config
|
7,159
|
def _cache_support(self, expire_time, fragm_name, vary_on, lineno, caller):
try:
expire_time = int(expire_time)
except (ValueError, __HOLE__):
raise TemplateSyntaxError('"%s" tag got a non-integer timeout '
'value: %r' % (list(self.tags)[0], expire_time), lineno)
cache_key = make_template_fragment_key(fragm_name, vary_on)
value = cache.get(cache_key)
if value is None:
value = caller()
cache.set(cache_key, force_text(value), expire_time)
else:
value = force_text(value)
return value
|
TypeError
|
dataset/ETHPy150Open niwinz/django-jinja/django_jinja/builtins/extensions.py/CacheExtension._cache_support
|
7,160
|
def test_ckdtree_pickle():
# test if it is possible to pickle
# a cKDTree
try:
import cPickle as pickle
except __HOLE__:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
|
ImportError
|
dataset/ETHPy150Open scipy/scipy/scipy/spatial/tests/test_kdtree.py/test_ckdtree_pickle
|
7,161
|
def test_ckdtree_pickle_boxsize():
# test if it is possible to pickle a periodic
# cKDTree
try:
import cPickle as pickle
except __HOLE__:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.uniform(size=(n, k))
T1 = cKDTree(points, boxsize=1.0)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
|
ImportError
|
dataset/ETHPy150Open scipy/scipy/scipy/spatial/tests/test_kdtree.py/test_ckdtree_pickle_boxsize
|
7,162
|
def test_ckdtree_box_upper_bounds():
data = np.linspace(0, 2, 10).reshape(-1, 1)
try:
cKDTree(data, leafsize=1, boxsize=1.0)
except __HOLE__:
return
raise AssertionError("ValueError is not raised")
|
ValueError
|
dataset/ETHPy150Open scipy/scipy/scipy/spatial/tests/test_kdtree.py/test_ckdtree_box_upper_bounds
|
7,163
|
def test_ckdtree_box_lower_bounds():
data = np.linspace(-1, 1, 10)
try:
cKDTree(data, leafsize=1, boxsize=1.0)
except __HOLE__:
return
raise AssertionError("ValueError is not raised")
|
ValueError
|
dataset/ETHPy150Open scipy/scipy/scipy/spatial/tests/test_kdtree.py/test_ckdtree_box_lower_bounds
|
7,164
|
def test_ckdtree_memuse():
# unit test adaptation of gh-5630
try:
import resource
except __HOLE__:
# resource is not available on Windows with Python 2.6
return
# Make some data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
z_copy = np.empty_like(z)
z_copy[:] = z
# Place FILLVAL in z_copy at random number of random locations
FILLVAL = 99.
mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
z_copy.flat[mask] = FILLVAL
igood = np.vstack(np.where(x != FILLVAL)).T
ibad = np.vstack(np.where(x == FILLVAL)).T
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# burn-in
for i in range(10):
tree = cKDTree(igood)
# count memleaks while constructing and querying cKDTree
num_leaks = 0
for i in range(100):
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
tree = cKDTree(igood)
dist, iquery = tree.query(ibad, k=4, p=2)
new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if new_mem_use > mem_use:
num_leaks += 1
# ideally zero leaks, but errors might accidentally happen
# outside cKDTree
assert_(num_leaks < 10)
|
ImportError
|
dataset/ETHPy150Open scipy/scipy/scipy/spatial/tests/test_kdtree.py/test_ckdtree_memuse
|
7,165
|
@c3bottles.route("/report", methods=("GET", "POST"))
@c3bottles.route("/<int:number>")
def report(number=None):
if number:
dp = DropPoint.get(number)
else:
dp = DropPoint.get(request.values.get("number"))
if not dp or dp.removed:
return render_template(
"error.html",
heading="Error!",
text="Drop point not found.",
)
state = request.values.get("state")
if state:
if g.no_anonymous_reporting and g.user.is_anonymous:
abort(401)
from model.report import Report
try:
Report(dp=dp, state=state)
except __HOLE__ as e:
return render_template(
"error.html",
text="Errors occurred while processing your report:",
errors=[v for d in e.args for v in d.values()]
)
else:
db.session.commit()
return render_template(
"success.html",
heading="Thank you!",
text="Your report has been received successfully."
)
else:
return render_template(
"report.html",
dp=dp
)
# vim: set expandtab ts=4 sw=4:
|
ValueError
|
dataset/ETHPy150Open der-michik/c3bottles/view/report.py/report
|
7,166
|
def on_iter_next(self, rowref):
n = self.on_get_path(rowref)
try:
rowref = self._model_data[n+1]
except __HOLE__, msg:
rowref = None
return rowref
|
IndexError
|
dataset/ETHPy150Open anandology/pyjamas/pygtkweb/demos/071-generictreemodel.py/MyTreeModel.on_iter_next
|
7,167
|
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except __HOLE__:
if self.parent:
return self.parent[name]
else:
raise
|
KeyError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/__init__.py/HierarchicalDict.__getitem__
|
7,168
|
def parseCallable(targetCall, name, user_args, user_kwargs):
'''
'''
#
# Check callable given
#
import inspect
if not (inspect.ismethod(targetCall) or inspect.isfunction(targetCall)):
raise GraphError("Callable must be a function or method.")
# Common args
callableArgs = {}
callableArgs['sysPath'] = sys.path
callableArgs['moduleName'] = targetCall.__module__
# Ensure params can be serialized i.e. not object, instance or function
try:
callableArgs['user_args'] = json.dumps(user_args)
callableArgs['user_kwargs'] = json.dumps(user_kwargs)
except __HOLE__:
raise GraphError("Error: invalid parameters (not JSON serializable): %s" % params)
# Specific args
if inspect.isfunction(targetCall):
callableArgs['execType'] = 'function'
callableArgs['funcName'] = targetCall.__name__
taskName = name if name != "" else callableArgs['funcName']
if inspect.ismethod(targetCall):
callableArgs['execType'] = 'method'
callableArgs['className'] = inspect.getmro(targetCall.im_class)[0].__name__
callableArgs['methodName'] = targetCall.__name__
taskName = name if name != "" else callableArgs['className'] + "." + callableArgs['methodName']
return (taskName, callableArgs)
|
TypeError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/__init__.py/parseCallable
|
7,169
|
def execNode(self, pCommand):
"""
| Emulate the execution of a command on a worker node.
| 2 possible execution mode: with a subprocess or direct
| - Calls the "commandwatcher.py" script used by the worker \
process to keep a similar behaviour
| Command output and error messages are left in stdout/stderr to \
give the user a proper feedback of its command
| - Create CommandWatcherObject in current exec
:param pCommand: dict containing the command description and arguments
:raise: GraphExecInterrupt when a keyboard interrupt is raised
"""
print ""
#from octopus.commandwatcher import commandwatcher
commandId = pCommand["execid"]
# taskId = pCommand["taskid"]
runner = pCommand["runner"]
runnerPackages = pCommand.get("runnerPackages", "undefined")
validationExpression = pCommand["validationExpression"]
print(pCommand)
#
# SECURE WAY: start a subprocess
#
#####
# CommandWatcher call, arguments expected are:
# - python executable
# - flag "u" to load commands from a file
# - file to load
# - a communication port to contact a worker if execution is done in remote contact (executed via puli's worker)
# - a command id
# - a runner class
# - an optionnal validation expression
# from octopus.commandwatcher import commandwatcher
# scriptFile = commandwatcher.__file__
# pythonExecutable = sys.executable
# args = [
# pythonExecutable,
# "-u",
# scriptFile,
# "",
# "0",
# str(commandId),
# runner,
# validationExpression,
# ]
# args.extend(('%s=%s' % (str(name), str(value)) for (name, value) in pCommand["arguments"].items()))
# #### normalize environment~-> TOCHECK peut etre a supprimer justement pour garder l'env en execution locale (attention au
# #### call subprocess qui ajoute envN
# envN = {}
# envN["PYTHONPATH"] = "/s/apps/lin/vfx_test_apps/OpenRenderManagement/Puli/src"
# for key in pCommand["environment"]:
# envN[str(key)] = str(pCommand["environment"][key])
# # print("Starting subprocess, log: %r, args: %r" % (logFile, args) )
# try:
# proc = subprocess.Popen(args, bufsize=-1, stdin=None, stdout=None,
# stderr=None, close_fds=True,
# env=envN)
# proc.wait()
# except Exception,e:
# print("Impossible to start subprocess: %r" % e)
# raise e
# except KeyboardInterrupt:
# sys.exit(0)
# print ""
#
# DIRECT CALL: create CommandWatcher object
#
from octopus.commandwatcher.commandwatcher import CommandWatcher
try:
result = CommandWatcher("",
"0",
commandId,
runner,
runnerPackages,
validationExpression,
pCommand["arguments"])
return result.finalState
except __HOLE__:
print("\n")
print("Exit event caught: exiting CommandWatcher...\n")
# sys.exit(0)
raise GraphExecInterrupt
|
KeyboardInterrupt
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/__init__.py/Graph.execNode
|
7,170
|
def getTaskIndex(self, task):
try:
return self.taskMem[task]
except __HOLE__:
return self.addTask(task)
|
KeyError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/__init__.py/GraphDumper.getTaskIndex
|
7,171
|
@classmethod
def generateQuaggaBoot(cls, node, services):
''' Generate a shell script used to boot the Quagga daemons.
'''
try:
quagga_bin_search = node.session.cfg['quagga_bin_search']
quagga_sbin_search = node.session.cfg['quagga_sbin_search']
except __HOLE__:
quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"'
quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"'
return """\
#!/bin/sh
# auto-generated by zebra service (quagga.py)
QUAGGA_CONF=%s
QUAGGA_SBIN_SEARCH=%s
QUAGGA_BIN_SEARCH=%s
QUAGGA_STATE_DIR=%s
QUAGGA_USER=%s
QUAGGA_GROUP=%s
searchforprog()
{
prog=$1
searchpath=$@
ret=
for p in $searchpath; do
if [ -x $p/$prog ]; then
ret=$p
break
fi
done
echo $ret
}
confcheck()
{
CONF_DIR=`dirname $QUAGGA_CONF`
# if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then
ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf
fi
# if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then
ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf
fi
}
waitforvtyfiles()
{
for f in "$@"; do
count=1
until [ -e $QUAGGA_STATE_DIR/$f ]; do
if [ $count -eq 10 ]; then
echo "ERROR: vty file not found: $QUAGGA_STATE_DIR/$f" >&2
return 1
fi
sleep 0.1
count=$(($count + 1))
done
done
}
bootdaemon()
{
QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH)
if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then
echo "ERROR: Quagga's '$1' daemon not found in search path:"
echo " $QUAGGA_SBIN_SEARCH"
return 1
fi
flags=""
if [ "$1" != "zebra" ]; then
waitforvtyfiles zebra.vty
fi
if [ "$1" = "xpimd" ] && \\
grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then
flags="$flags -6"
fi
$QUAGGA_SBIN_DIR/$1 $flags -u $QUAGGA_USER -g $QUAGGA_GROUP -d
}
bootvtysh()
{
QUAGGA_BIN_DIR=$(searchforprog $1 $QUAGGA_BIN_SEARCH)
if [ "z$QUAGGA_BIN_DIR" = "z" ]; then
echo "ERROR: Quagga's '$1' daemon not found in search path:"
echo " $QUAGGA_SBIN_SEARCH"
return 1
fi
vtyfiles="zebra.vty"
for r in rip ripng ospf6 ospf bgp babel; do
if grep -q "^router \<${r}\>" $QUAGGA_CONF; then
vtyfiles="$vtyfiles ${r}d.vty"
fi
done
if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then
vtyfiles="$vtyfiles xpimd.vty"
fi
# wait for Quagga daemon vty files to appear before invoking vtysh
waitforvtyfiles $vtyfiles
$QUAGGA_BIN_DIR/vtysh -b
}
confcheck
if [ "x$1" = "x" ]; then
echo "ERROR: missing the name of the Quagga daemon to boot"
exit 1
elif [ "$1" = "vtysh" ]; then
bootvtysh $1
else
bootdaemon $1
fi
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \
QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP)
|
KeyError
|
dataset/ETHPy150Open coreemu/core/daemon/core/services/quagga.py/Zebra.generateQuaggaBoot
|
7,172
|
def watch(self, key, recurse=False, timeout=0, index=None):
ret = {
'key': key,
'value': None,
'changed': False,
'mIndex': 0,
'dir': False
}
try:
result = self.read(key, recursive=recurse, wait=True, timeout=timeout, waitIndex=index)
except EtcdUtilWatchTimeout:
try:
result = self.read(key)
except etcd.EtcdKeyNotFound:
log.debug("etcd: key was not created while watching")
return ret
except __HOLE__:
return {}
if result and getattr(result, "dir"):
ret['dir'] = True
ret['value'] = getattr(result, 'value')
ret['mIndex'] = getattr(result, 'modifiedIndex')
return ret
except (etcd.EtcdConnectionFailed, MaxRetryError):
# This gets raised when we can't contact etcd at all
log.error("etcd: failed to perform 'watch' operation on key {0} due to connection error".format(key))
return {}
except ValueError:
return {}
if recurse:
ret['key'] = getattr(result, 'key', None)
ret['value'] = getattr(result, 'value', None)
ret['dir'] = getattr(result, 'dir', None)
ret['changed'] = True
ret['mIndex'] = getattr(result, 'modifiedIndex')
return ret
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.watch
|
7,173
|
def get(self, key, recurse=False):
try:
result = self.read(key, recursive=recurse)
except etcd.EtcdKeyNotFound:
# etcd already logged that the key wasn't found, no need to do
# anything here but return
return None
except etcd.EtcdConnectionFailed:
log.error("etcd: failed to perform 'get' operation on key {0} due to connection error".format(key))
return None
except __HOLE__:
return None
return getattr(result, 'value', None)
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.get
|
7,174
|
def read(self, key, recursive=False, wait=False, timeout=None, waitIndex=None):
try:
if waitIndex:
result = self.client.read(key, recursive=recursive, wait=wait, timeout=timeout, waitIndex=waitIndex)
else:
result = self.client.read(key, recursive=recursive, wait=wait, timeout=timeout)
except (etcd.EtcdConnectionFailed, etcd.EtcdKeyNotFound) as err:
log.error("etcd: {0}".format(err))
raise
except ReadTimeoutError:
# For some reason, we have to catch this directly. It falls through
# from python-etcd because it's trying to catch
# urllib3.exceptions.ReadTimeoutError and strangely, doesn't catch.
# This can occur from a watch timeout that expires, so it may be 'expected'
# behavior. See issue #28553
if wait:
# Wait timeouts will throw ReadTimeoutError, which isn't bad
log.debug("etcd: Timed out while executing a wait")
raise EtcdUtilWatchTimeout("Watch on {0} timed out".format(key))
log.error("etcd: Timed out")
raise etcd.EtcdConnectionFailed("Connection failed")
except MaxRetryError as err:
# Same issue as ReadTimeoutError. When it 'works', python-etcd
# throws EtcdConnectionFailed, so we'll do that for it.
log.error("etcd: Could not connect")
raise etcd.EtcdConnectionFailed("Could not connect to etcd server")
except etcd.EtcdException as err:
# EtcdValueError inherits from ValueError, so we don't want to accidentally
# catch this below on ValueError and give a bogus error message
log.error("etcd: {0}".format(err))
raise
except __HOLE__:
# python-etcd doesn't fully support python 2.6 and ends up throwing this for *any* exception because
# it uses the newer {} format syntax
log.error("etcd: error. python-etcd does not fully support python 2.6, no error information available")
raise
except Exception as err:
log.error('etcd: uncaught exception {0}'.format(err))
raise
return result
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.read
|
7,175
|
def write(self, key, value, ttl=None, directory=False):
# directories can't have values, but have to have it passed
if directory:
value = None
try:
result = self.client.write(key, value, ttl=ttl, dir=directory)
except (etcd.EtcdNotFile, etcd.EtcdNotDir, etcd.EtcdRootReadOnly, __HOLE__) as err:
log.error('etcd: {0}'.format(err))
return None
except MaxRetryError as err:
log.error("etcd: Could not connect to etcd server: {0}".format(err))
return None
except Exception as err:
log.error('etcd: uncaught exception {0}'.format(err))
raise
if directory:
return getattr(result, 'dir')
else:
return getattr(result, 'value')
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.write
|
7,176
|
def ls(self, path):
ret = {}
try:
items = self.read(path)
except (etcd.EtcdKeyNotFound, __HOLE__):
return {}
except etcd.EtcdConnectionFailed:
log.error("etcd: failed to perform 'ls' operation on path {0} due to connection error".format(path))
return None
for item in items.children:
if item.dir is True:
if item.key == path:
continue
dir_name = '{0}/'.format(item.key)
ret[dir_name] = {}
else:
ret[item.key] = item.value
return {path: ret}
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.ls
|
7,177
|
def delete(self, key, recursive=False):
try:
if self.client.delete(key, recursive=recursive):
return True
else:
return False
except (etcd.EtcdNotFile, etcd.EtcdRootReadOnly, etcd.EtcdDirNotEmpty, etcd.EtcdKeyNotFound, __HOLE__) as err:
log.error('etcd: {0}'.format(err))
return None
except MaxRetryError as err:
log.error('etcd: Could not connect to etcd server: {0}'.format(err))
return None
except Exception as err:
log.error('etcd: uncaught exception {0}'.format(err))
raise
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.delete
|
7,178
|
def tree(self, path):
'''
.. versionadded:: 2014.7.0
Recurse through etcd and return all values
'''
ret = {}
try:
items = self.read(path)
except (etcd.EtcdKeyNotFound, __HOLE__):
return None
except etcd.EtcdConnectionFailed:
log.error("etcd: failed to perform 'tree' operation on path {0} due to connection error".format(path))
return None
for item in items.children:
comps = str(item.key).split('/')
if item.dir is True:
if item.key == path:
continue
ret[comps[-1]] = self.tree(item.key)
else:
ret[comps[-1]] = item.value
return ret
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/etcd_util.py/EtcdClient.tree
|
7,179
|
def load(self):
try:
s = MongoSession.objects(session_key=self.session_key,
expire_date__gt=datetime_now)[0]
if MONGOENGINE_SESSION_DATA_ENCODE:
return self.decode(force_text(s.session_data))
else:
return s.session_data
except (__HOLE__, SuspiciousOperation):
self._session_key = None
return {}
|
IndexError
|
dataset/ETHPy150Open MongoEngine/django-mongoengine/django_mongoengine/sessions.py/SessionStore.load
|
7,180
|
def main():
try:
hostname = raw_input("Enter remote host to test: ")
except __HOLE__:
hostname = input("Enter remote host to test: ")
home_dir = (path.expanduser('~'))
key_file = "{}/.ssh/cisco_rsa".format(home_dir)
cisco_test = {
'ip': hostname,
'username': 'testuser2',
'device_type': 'cisco_ios',
'use_keys': True,
'key_file': key_file,
'verbose': False}
net_connect = ConnectHandler(**cisco_test)
print()
print("Checking prompt: ")
print(net_connect.find_prompt())
print()
print("Testing show ip int brief: ")
output = net_connect.send_command("show ip int brief")
print(output)
print()
|
NameError
|
dataset/ETHPy150Open ktbyers/netmiko/tests/test_cisco_w_key.py/main
|
7,181
|
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except __HOLE__:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
|
ValueError
|
dataset/ETHPy150Open owtf/owtf/framework/shell/async_subprocess.py/AsyncPopen.send
|
7,182
|
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except __HOLE__:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
if self.universal_newlines:
read = self._translate_newlines(read)
return read
|
ValueError
|
dataset/ETHPy150Open owtf/owtf/framework/shell/async_subprocess.py/AsyncPopen._recv
|
7,183
|
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except __HOLE__, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
|
OSError
|
dataset/ETHPy150Open owtf/owtf/framework/shell/async_subprocess.py/AsyncPopen.send
|
7,184
|
@blueprint.route('/login', methods=['GET','POST'])
def login():
"""
Ask for a username (no password required)
Sets a cookie
"""
# Get the URL to redirect to after logging in
next_url = utils.routing.get_request_arg('next') or \
flask.request.referrer or flask.url_for('.home')
if flask.request.method == 'GET':
return flask.render_template('login.html', next=next_url)
# Validate username
username = utils.routing.get_request_arg('username').strip()
try:
utils.auth.validate_username(username)
except __HOLE__ as e:
# Invalid username
flask.flash(e.message, 'danger')
return flask.render_template('login.html', next=next_url)
# Valid username
response = flask.make_response(flask.redirect(next_url))
response.set_cookie('username', username)
return response
|
ValueError
|
dataset/ETHPy150Open NVIDIA/DIGITS/digits/views.py/login
|
7,185
|
def _PopLine(self):
while len(self._current_line) <= self._column:
try:
self._current_line = next(self._lines)
except __HOLE__:
self._current_line = ''
self._more_lines = False
return
else:
self._line += 1
self._column = 0
|
StopIteration
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer._PopLine
|
7,186
|
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeInt32
|
7,187
|
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeUint32
|
7,188
|
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeInt64
|
7,189
|
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeUint64
|
7,190
|
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeFloat
|
7,191
|
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeBool
|
7,192
|
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except __HOLE__ as e:
raise self._StringParseError(e)
|
UnicodeDecodeError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeString
|
7,193
|
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
Raises:
ParseError: When the wrong format data is found.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Expected string but found: %r' % (text,))
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote: %r' % (text,))
try:
result = text_encoding.CUnescape(text[1:-1])
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer._ConsumeSingleByteString
|
7,194
|
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except __HOLE__ as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/_Tokenizer.ConsumeEnum
|
7,195
|
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
result = int(text, 0)
else:
result = int(text, 0)
except __HOLE__:
raise ValueError('Couldn\'t parse integer: %s' % text)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/ParseInteger
|
7,196
|
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except __HOLE__:
raise ValueError('Couldn\'t parse float: %s' % text)
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/ParseFloat
|
7,197
|
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except __HOLE__:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
return enum_value.number
|
ValueError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/externals/google/protobuf/text_format.py/ParseEnum
|
7,198
|
@defer.inlineCallbacks
def on_GET(self, request, user_id, filter_id):
target_user = UserID.from_string(user_id)
requester = yield self.auth.get_user_by_req(request)
if target_user != requester.user:
raise AuthError(403, "Cannot get filters for other users")
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only get filters for local users")
try:
filter_id = int(filter_id)
except:
raise SynapseError(400, "Invalid filter_id")
try:
filter = yield self.filtering.get_user_filter(
user_localpart=target_user.localpart,
filter_id=filter_id,
)
defer.returnValue((200, filter.get_filter_json()))
except __HOLE__:
raise SynapseError(400, "No such filter")
|
KeyError
|
dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v2_alpha/filter.py/GetFilterRestServlet.on_GET
|
7,199
|
def getMelRepresentation( args, recursionLimit=None, maintainDicts=True):
"""Will return a list which contains each element of the iterable 'args' converted to a mel-friendly representation.
:Parameters:
recursionLimit : int or None
If an element of args is itself iterable, recursionLimit specifies the depth to which iterable elements
will recursively search for objects to convert; if ``recursionLimit==0``, only the elements
of args itself will be searched for PyNodes - if it is 1, iterables within args will have getMelRepresentation called
on them, etc. If recursionLimit==None, then there is no limit to recursion depth.
maintainDicts : bool
In general, all iterables will be converted to tuples in the returned copy - however, if maintainDicts==True,
then iterables for which ``util.isMapping()`` returns True will be returned as dicts.
"""
if recursionLimit:
recursionLimit -= 1
if maintainDicts and util.isMapping(args):
newargs = dict(args)
argIterable = args.iteritems()
isList = False
else:
newargs = list(args)
argIterable = enumerate(args)
isList = True
for index, value in argIterable:
try:
newargs[index] = value.__melobject__()
except __HOLE__:
if ( (not recursionLimit) or recursionLimit >= 0) and util.isIterable(value):
# ...otherwise, recurse if not at recursion limit and it's iterable
newargs[index] = getMelRepresentation(value, recursionLimit, maintainDicts)
if isList:
newargs = tuple(newargs)
return newargs
|
AttributeError
|
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/pmcmds.py/getMelRepresentation
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.