Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,200
def addWrappedCmd(cmdname, cmd=None): if cmd is None: cmd = getattr(maya.cmds, cmdname) #if cmd.__name__ == 'dummyFunc': print cmdname def wrappedCmd(*args, **kwargs): # we must get the cmd each time, because maya delays loading of functions until they are needed. # if we don't reload we'll keep the dummyFunc around new_cmd = getattr(maya.cmds, cmdname) #print args, kwargs # convert args to mel-friendly representation new_args = getMelRepresentation(args) # flatten list. this is necessary for list of components. see Issue 71. however, be sure that it's not an empty list/tuple if len(new_args) == 1 and util.isIterable(new_args[0]) and len(new_args[0]): #isinstance( new_args[0], (tuple, list) ): new_args = new_args[0] new_kwargs = getMelRepresentation(kwargs) #print new_args, new_kwargs try: res = new_cmd(*new_args, **new_kwargs) except objectErrorType, e: m = objectErrorReg.match(str(e)) if m: import pymel.core.general obj = m.group(1) raise pymel.core.general._objectError(obj) else: # re-raise error raise # when editing, some of maya.cmds functions return empty strings and some return idiotic statements like 'Values Edited'. # however, for UI's in particular, people use the edit command to get a pymel class for existing objects. # return None when we get an empty string try: if res=='' and kwargs.get('edit', kwargs.get('e', False) ): return None except __HOLE__: pass return res wrappedCmd.__doc__ = cmd.__doc__ oldname = getattr(cmd, '__name__', None) if isinstance(oldname, str): # Don't use cmd.__name__, as this could be 'stubFunc' wrappedCmd.__name__ = getCmdName(cmd) else: wrappedCmd.__name__ = str(cmdname) # for debugging, to make sure commands got wrapped... #wrappedCmd = _testDecorator(wrappedCmd) # so that we can identify that this is a wrapped maya command setattr( _thisModule, cmdname, wrappedCmd ) #globals()[cmdname] = wrappedCmd
AttributeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/pmcmds.py/addWrappedCmd
7,201
def removeWrappedCmd(cmdname): try: del cmdname except __HOLE__: warnings.warn("%s not found in %s" % (cmdname, __name__))
NameError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/pmcmds.py/removeWrappedCmd
7,202
def test_expect_not_to_be_here(): try: expect.not_to_be_here() except __HOLE__: err = sys.exc_info()[1] expect(err).to_be_an_error() expect(err).to_be_an_error_like(AssertionError) expect(err).to_have_an_error_message_of("Should not have gotten this far.") else: assert False, "Should not have gotten this far."
AssertionError
dataset/ETHPy150Open heynemann/preggy/tests/test_be_here.py/test_expect_not_to_be_here
7,203
def test_expect_not_to_be_here_with_message(): try: expect.not_to_be_here("qweqwe") except __HOLE__: err = sys.exc_info()[1] expect(err).to_be_an_error() expect(err).to_be_an_error_like(AssertionError) expect(err).to_have_an_error_message_of("Should not have gotten this far (qweqwe).") else: assert False, "Should not have gotten this far."
AssertionError
dataset/ETHPy150Open heynemann/preggy/tests/test_be_here.py/test_expect_not_to_be_here_with_message
7,204
def scan_on_disk(node, env, path=()): """ Scans a directory for on-disk files and directories therein. Looking up the entries will add these to the in-memory Node tree representation of the file system, so all we have to do is just that and then call the in-memory scanning function. """ try: flist = node.fs.listdir(node.abspath) except (__HOLE__, OSError): return [] e = node.Entry for f in filter(do_not_scan, flist): # Add ./ to the beginning of the file name so if it begins with a # '#' we don't look it up relative to the top-level directory. e('./' + f) return scan_in_memory(node, env, path)
IOError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/Dir.py/scan_on_disk
7,205
def scan_in_memory(node, env, path=()): """ "Scans" a Node.FS.Dir for its in-memory entries. """ try: entries = node.entries except __HOLE__: # It's not a Node.FS.Dir (or doesn't look enough like one for # our purposes), which can happen if a target list containing # mixed Node types (Dirs and Files, for example) has a Dir as # the first entry. return [] entry_list = sorted(filter(do_not_scan, list(entries.keys()))) return [entries[n] for n in entry_list] # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
AttributeError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/Dir.py/scan_in_memory
7,206
def _read_to_buffer(self): """Reads from the socket and appends the result to the read buffer. Returns the number of bytes read. Returns 0 if there is nothing to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """ try: chunk = self.read_from_fd() except (socket.error, __HOLE__, OSError) as e: # ssl.SSLError is a subclass of socket.error if e.args[0] == errno.ECONNRESET: # Treat ECONNRESET as a connection close rather than # an error to minimize log spam (the exception will # be available on self.error for apps that care). self.close(exc_info=True) return self.close(exc_info=True) raise if chunk is None: return 0 self._read_buffer.append(chunk) self._read_buffer_size += len(chunk) if self._read_buffer_size >= self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() raise IOError("Reached maximum read buffer size") return len(chunk)
IOError
dataset/ETHPy150Open D-L/SimpleBookMarks/src/tornado/iostream.py/BaseIOStream._read_to_buffer
7,207
def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): return self.close(exc_info=True) elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() except Exception: peer = '(not connected)' gen_log.warning("SSL Error on %d %s: %s", self.socket.fileno(), peer, err) return self.close(exc_info=True) raise except socket.error as err: if err.args[0] in (errno.ECONNABORTED, errno.ECONNRESET): return self.close(exc_info=True) except __HOLE__: # On Linux, if the connection was reset before the call to # wrap_socket, do_handshake will fail with an # AttributeError. return self.close(exc_info=True) else: self._ssl_accepting = False if not self._verify_cert(self.socket.getpeercert()): self.close() return if self._ssl_connect_callback is not None: callback = self._ssl_connect_callback self._ssl_connect_callback = None self._run_callback(callback)
AttributeError
dataset/ETHPy150Open D-L/SimpleBookMarks/src/tornado/iostream.py/SSLIOStream._do_ssl_handshake
7,208
def read_from_fd(self): try: chunk = os.read(self.fd, self.read_chunk_size) except (__HOLE__, OSError) as e: if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN): return None elif e.args[0] == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=True) return None else: raise if not chunk: self.close() return None return chunk
IOError
dataset/ETHPy150Open D-L/SimpleBookMarks/src/tornado/iostream.py/PipeIOStream.read_from_fd
7,209
def on_done(self, text): try: line = int(text) if self.window.active_view(): self.window.active_view().run_command("goto_line", {"line": line} ) except __HOLE__: pass
ValueError
dataset/ETHPy150Open JT5D/Alfred-Popclip-Sublime/Sublime Text 2/Default/goto_line.py/PromptGotoLineCommand.on_done
7,210
def __init__ ( self, optDict, dom, node ): threading.Thread.__init__(self) self.uptodate_FLAG = False for elem in vars(optDict): setattr(self, elem, getattr(optDict, elem)) self.dom = dom self.node = node self.siteParser = SiteParserFactory.getInstance(self) try: self.siteParser.parseSite() # create download directory if not found try: if os.path.exists(self.downloadPath) is False: os.makedirs(self.downloadPath) except __HOLE__: print("""Unable to create download directory. There may be a file with the same name, or you may not have permissions to write there.""") raise except self.siteParser.NoUpdates: self.uptodate_FLAG = True print ("Manga ("+self.manga+") up-to-date.") print('\n')
OSError
dataset/ETHPy150Open jiaweihli/manga_downloader/src/parsers/thread.py/SiteParserThread.__init__
7,211
@property def labels(self): # this was index_int, but that's not a very good name... if hasattr(self.index, 'labels'): return self.index.labels else: # pandas version issue here # Compat code for the labels -> codes change in pandas 0.15 # FIXME: use .codes directly when we don't want to support # pandas < 0.15 tmp = pd.Categorical(self.index) try: labl = tmp.codes except __HOLE__: labl = tmp.labels # Old pandsd return labl[None]
AttributeError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/tools/grouputils.py/Grouping.labels
7,212
@never_cache def monitor(request): """View for services monitor.""" status = {} # Note: To add a new component to the services monitor, do your # testing and then add a name -> list of output tuples map to # status. # Check memcached. memcache_results = [] try: for cache_name, cache_props in settings.CACHES.items(): result = True backend = cache_props['BACKEND'] location = cache_props['LOCATION'] # LOCATION can be a string or a list of strings if isinstance(location, basestring): location = location.split(';') if 'memcache' in backend: for loc in location: # TODO: this doesn't handle unix: variant ip, port = loc.split(':') result = test_memcached(ip, int(port)) memcache_results.append( (INFO, '%s:%s %s' % (ip, port, result))) if not memcache_results: memcache_results.append((ERROR, 'memcache is not configured.')) elif len(memcache_results) < 2: memcache_results.append( (ERROR, ('You should have at least 2 memcache servers. ' 'You have %s.' % len(memcache_results)))) else: memcache_results.append((INFO, 'memcached servers look good.')) except Exception as exc: memcache_results.append( (ERROR, 'Exception while looking at memcached: %s' % str(exc))) status['memcached'] = memcache_results # Check Libraries and versions libraries_results = [] try: Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG') libraries_results.append((INFO, 'PIL+JPEG: Got it!')) except Exception as exc: libraries_results.append( (ERROR, 'PIL+JPEG: Probably missing: ' 'Failed to create a jpeg image: %s' % exc)) status['libraries'] = libraries_results # Check file paths. msg = 'We want read + write.' filepaths = ( (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg), (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg), (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg), (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg), ) filepath_results = [] for path, perms, notes in filepaths: path = os.path.join(settings.MEDIA_ROOT, path) path_exists = os.path.isdir(path) path_perms = os.access(path, perms) if path_exists and path_perms: filepath_results.append( (INFO, '%s: %s %s %s' % (path, path_exists, path_perms, notes))) status['filepaths'] = filepath_results # Check RabbitMQ. rabbitmq_results = [] try: rabbit_conn = establish_connection(connect_timeout=5) rabbit_conn.connect() rabbitmq_results.append( (INFO, 'Successfully connected to RabbitMQ.')) except (socket.error, __HOLE__) as exc: rabbitmq_results.append( (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc))) except Exception as exc: rabbitmq_results.append( (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc))) status['RabbitMQ'] = rabbitmq_results # Check ES. es_results = [] try: es_utils.get_doctype_stats(es_utils.all_read_indexes()[0]) es_results.append( (INFO, ('Successfully connected to ElasticSearch and index ' 'exists.'))) except es_utils.ES_EXCEPTIONS as exc: es_results.append( (ERROR, 'ElasticSearch problem: %s' % str(exc))) except Exception as exc: es_results.append( (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc))) status['ElasticSearch'] = es_results # Check Celery. # start = time.time() # pong = celery.task.ping() # rabbit_results = r = {'duration': time.time() - start} # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1 # Check Redis. redis_results = [] if hasattr(settings, 'REDIS_BACKENDS'): for backend in settings.REDIS_BACKENDS: try: redis_client(backend) redis_results.append((INFO, '%s: Pass!' % backend)) except RedisError: redis_results.append((ERROR, '%s: Fail!' % backend)) status['Redis'] = redis_results status_code = 200 status_summary = {} for component, output in status.items(): if ERROR in [item[0] for item in output]: status_code = 500 status_summary[component] = False else: status_summary[component] = True return render(request, 'services/monitor.html', { 'component_status': status, 'status_summary': status_summary}, status=status_code)
IOError
dataset/ETHPy150Open mozilla/kitsune/kitsune/sumo/views.py/monitor
7,213
def add_user(self, username, raise_on_error=False, **kwargs): """Add a user to the directory Args: username: The account username raise_on_error: optional (default: False) **kwargs: key-value pairs: password: mandatory email: mandatory first_name: optional last_name: optional display_name: optional active: optional (default True) Returns: True: Succeeded False: If unsuccessful """ # Check that mandatory elements have been provided if 'password' not in kwargs: raise ValueError("missing password") if 'email' not in kwargs: raise ValueError("missing email") # Populate data with default and mandatory values. # A KeyError means a mandatory value was not provided, # so raise a ValueError indicating bad args. try: data = { "name": username, "first-name": username, "last-name": username, "display-name": username, "email": kwargs["email"], "password": {"value": kwargs["password"]}, "active": True } except __HOLE__: return ValueError # Remove special case 'password' del(kwargs["password"]) # Put values from kwargs into data for k, v in kwargs.items(): new_k = k.replace("_", "-") if new_k not in data: raise ValueError("invalid argument %s" % k) data[new_k] = v response = self._post(self.rest_url + "/user", data=json.dumps(data)) if response.status_code == 201: return True if raise_on_error: raise RuntimeError(response.json()['message']) return False
KeyError
dataset/ETHPy150Open pycontribs/python-crowd/crowd.py/CrowdServer.add_user
7,214
def time_of_last_submission(self): if self.last_submission_time is None and self.num_of_submissions > 0: try: last_submission = self.instances.\ filter(deleted_at__isnull=True).latest("date_created") except __HOLE__: pass else: self.last_submission_time = last_submission.date_created self.save() return self.last_submission_time
ObjectDoesNotExist
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/models/xform.py/XForm.time_of_last_submission
7,215
def time_of_last_submission_update(self): try: # we also consider deleted instances in this case return self.instances.latest("date_modified").date_modified except __HOLE__: pass
ObjectDoesNotExist
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/models/xform.py/XForm.time_of_last_submission_update
7,216
def update_profile_num_submissions(sender, instance, **kwargs): profile_qs = User.profile.get_queryset() try: profile = profile_qs.select_for_update()\ .get(pk=instance.user.profile.pk) except __HOLE__: pass else: profile.num_of_submissions -= instance.num_of_submissions if profile.num_of_submissions < 0: profile.num_of_submissions = 0 profile.save()
ObjectDoesNotExist
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/models/xform.py/update_profile_num_submissions
7,217
@staticmethod def _get_credentials(request): authorization = AUTHORIZATION(request.environ) try: authmeth, auth = authorization.split(' ', 1) except __HOLE__: # not enough values to unpack return None if authmeth.lower() == 'basic': try: auth = auth.strip().decode('base64') except binascii.Error: # can't decode return None try: login, password = auth.split(':', 1) except ValueError: # not enough values to unpack return None return {'login':login, 'password':password} return None
ValueError
dataset/ETHPy150Open SMFOSS/CheesePrism/cheeseprism/auth.py/BasicAuthenticationPolicy._get_credentials
7,218
def dataReceived(self, data): """ Protocol.dataReceived. Translates bytes into lines, and calls lineReceived (or rawDataReceived, depending on mode.) """ self.__buffer = self.__buffer+data while self.line_mode and not self.paused: try: line, self.__buffer = self.__buffer.split(self.delimiter, 1) except __HOLE__: if len(self.__buffer) > self.MAX_LENGTH: line, self.__buffer = self.__buffer, '' return self.lineLengthExceeded(line) break else: linelength = len(line) if linelength > self.MAX_LENGTH: exceeded = line + self.__buffer self.__buffer = '' return self.lineLengthExceeded(exceeded) why = self.lineReceived(line) if why or self.transport and self.transport.disconnecting: return why else: if not self.paused: data=self.__buffer self.__buffer='' if data: return self.rawDataReceived(data)
ValueError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/protocols/basic.py/LineReceiver.dataReceived
7,219
def stringReceived(self, string): """ Choose a protocol phase function and call it. Call back to the appropriate protocol phase; this begins with the function proto_init and moves on to proto_* depending on what each proto_* function returns. (For example, if self.proto_init returns 'foo', then self.proto_foo will be the next function called when a protocol message is received. """ try: pto = 'proto_'+self.state statehandler = getattr(self,pto) except __HOLE__: log.msg('callback',self.state,'not found') else: self.state = statehandler(string) if self.state == 'done': self.transport.loseConnection()
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/protocols/basic.py/StatefulStringProtocol.stringReceived
7,220
def test_taggit_serializer_field(self): correct_value = ["1", "2", "3"] serializer_field = serializers.TagListSerializerField() correct_value = serializer_field.to_internal_value(correct_value) assert type(correct_value) is list incorrect_value = "123" try: incorrect_value = serializer_field.to_internal_value( incorrect_value) except __HOLE__: pass
ValidationError
dataset/ETHPy150Open glemmaPaul/django-taggit-serializer/tests/test_serializers.py/TestTaggit_serializer.test_taggit_serializer_field
7,221
def add_available(self, filename): # quick verification that it exists and can be read try: filename = filename.decode('utf-8') f = open(filename) f.close() except __HOLE__ as e: raise LogError('Could not read log file "%s" (%s)' % (filename, e)) logger.debug('Adding log file %s', filename) return self.available.add(filename)
IOError
dataset/ETHPy150Open Jahaja/psdash/psdash/log.py/Logs.add_available
7,222
def get_available(self): available = [] to_remove = [] for filename in self.available: try: log = self.get(filename) available.append(log) except __HOLE__: logger.info('Failed to get "%s", removing from available logs', filename) to_remove.append(filename) if to_remove: map(self.remove_available, to_remove) return available
IOError
dataset/ETHPy150Open Jahaja/psdash/psdash/log.py/Logs.get_available
7,223
def migrate_ptc_db(old_db_dir, new_db_dir): old_db_path = os.path.join(old_db_dir, "ptcwallet.db") if not os.path.exists(old_db_path): return True old_db = leveldb.LevelDB(old_db_path) try: p_key = old_db.Get("private_key") new_db = unqlite.UnQLite(os.path.join(new_db_dir, "ptcwallet.db")) new_db['private_key'] = p_key except __HOLE__: pass
KeyError
dataset/ETHPy150Open lbryio/lbry/lbrynet/db_migrator/migrate0to1.py/migrate_ptc_db
7,224
def migrate_lbryfile_manager_db(old_db_dir, new_db_dir): old_db_path = os.path.join(old_db_dir, "lbryfiles.db") if not os.path.exists(old_db_path): return True old_db = leveldb.LevelDB(old_db_path) new_db = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db")) c = new_db.cursor() c.execute("create table if not exists lbry_file_options (" + " blob_data_rate real, " + " status text," + " stream_hash text," " foreign key(stream_hash) references lbry_files(stream_hash)" + ")") new_db.commit() LBRYFILE_STATUS = "t" LBRYFILE_OPTIONS = "o" c = new_db.cursor() for k, v in old_db.RangeIter(): key_type, stream_hash = json.loads(k) if key_type == LBRYFILE_STATUS: try: rate = json.loads(old_db.Get(json.dumps((LBRYFILE_OPTIONS, stream_hash))))[0] except __HOLE__: rate = None c.execute("insert into lbry_file_options values (?, ?, ?)", (rate, v, stream_hash)) new_db.commit() new_db.close()
KeyError
dataset/ETHPy150Open lbryio/lbry/lbrynet/db_migrator/migrate0to1.py/migrate_lbryfile_manager_db
7,225
def create_oauth_token(expiration=None, scope=None, key=None, secret=None, name=None, output=True): """ Script to obtain an OAuth token from Trello. Must have TRELLO_API_KEY and TRELLO_API_SECRET set in your environment To set the token's expiration, set TRELLO_EXPIRATION as a string in your environment settings (eg. 'never'), otherwise it will default to 30 days. More info on token scope here: https://trello.com/docs/gettingstarted/#getting-a-token-from-a-user """ request_token_url = 'https://trello.com/1/OAuthGetRequestToken' authorize_url = 'https://trello.com/1/OAuthAuthorizeToken' access_token_url = 'https://trello.com/1/OAuthGetAccessToken' expiration = expiration or os.environ.get('TRELLO_EXPIRATION', "30days") scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write') trello_key = key or os.environ['TRELLO_API_KEY'] trello_secret = secret or os.environ['TRELLO_API_SECRET'] name = name or os.environ.get('TRELLO_NAME', 'py-trello') # Step 1: Get a request token. This is a temporary token that is used for # having the user authorize an access token and to sign the request to obtain # said access token. session = OAuth1Session(client_key=trello_key, client_secret=trello_secret) response = session.fetch_request_token(request_token_url) resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret') if output: print("Request Token:") print(" - oauth_token = %s" % resource_owner_key) print(" - oauth_token_secret = %s" % resource_owner_secret) print("") # Step 2: Redirect to the provider. Since this is a CLI script we do not # redirect. In a web application you would redirect the user to the URL # below. print("Go to the following link in your browser:") print("{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}&name={name}".format( authorize_url=authorize_url, oauth_token=resource_owner_key, expiration=expiration, scope=scope, name=name )) # After the user has granted access to you, the consumer, the provider will # redirect you to whatever URL you have told them to redirect to. You can # usually define this in the oauth_callback argument as well. # Python 3 compatibility (raw_input was renamed to input) try: inputFunc = raw_input except __HOLE__: inputFunc = input accepted = 'n' while accepted.lower() == 'n': accepted = inputFunc('Have you authorized me? (y/n) ') oauth_verifier = inputFunc('What is the PIN? ') # Step 3: Once the consumer has redirected the user back to the oauth_callback # URL you can request the access token the user has approved. You use the # request token to sign this request. After this is done you throw away the # request token and use the access token returned. You should store this # access token somewhere safe, like a database, for future use. session = OAuth1Session(client_key=trello_key, client_secret=trello_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, verifier=oauth_verifier) access_token = session.fetch_access_token(access_token_url) if output: print("Access Token:") print(" - oauth_token = %s" % access_token['oauth_token']) print(" - oauth_token_secret = %s" % access_token['oauth_token_secret']) print("") print("You may now access protected resources using the access tokens above.") print("") return access_token
NameError
dataset/ETHPy150Open sarumont/py-trello/trello/util.py/create_oauth_token
7,226
def add_handler(self, handler): if not hasattr(handler, "add_parent"): raise TypeError("expected BaseHandler instance, got %r" % type(handler)) added = False for meth in dir(handler): if meth in ["redirect_request", "do_open", "proxy_open"]: # oops, coincidental match continue i = meth.find("_") protocol = meth[:i] condition = meth[i+1:] if condition.startswith("error"): j = condition.find("_") + i + 1 kind = meth[j+1:] try: kind = int(kind) except __HOLE__: pass lookup = self.handle_error.get(protocol, {}) self.handle_error[protocol] = lookup elif condition == "open": kind = protocol lookup = self.handle_open elif condition == "response": kind = protocol lookup = self.process_response elif condition == "request": kind = protocol lookup = self.process_request else: continue handlers = lookup.setdefault(kind, []) if handlers: bisect.insort(handlers, handler) else: handlers.append(handler) added = True if added: # XXX why does self.handlers need to be sorted? bisect.insort(self.handlers, handler) handler.add_parent(self)
ValueError
dataset/ETHPy150Open babble/babble/include/jython/Lib/urllib2.py/OpenerDirector.add_handler
7,227
def get_authorization(self, req, chal): try: realm = chal['realm'] nonce = chal['nonce'] qop = chal.get('qop') algorithm = chal.get('algorithm', 'MD5') # mod_digest doesn't send an opaque, even though it isn't # supposed to be optional opaque = chal.get('opaque', None) except __HOLE__: return None H, KD = self.get_algorithm_impls(algorithm) if H is None: return None user, pw = self.passwd.find_user_password(realm, req.get_full_url()) if user is None: return None # XXX not implemented yet if req.has_data(): entdig = self.get_entity_digest(req.get_data(), chal) else: entdig = None A1 = "%s:%s:%s" % (user, realm, pw) A2 = "%s:%s" % (req.get_method(), # XXX selector: what about proxies and full urls req.get_selector()) if qop == 'auth': self.nonce_count += 1 ncvalue = '%08x' % self.nonce_count cnonce = self.get_cnonce(nonce) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) respdig = KD(H(A1), noncebit) elif qop is None: respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) else: # XXX handle auth-int. raise URLError("qop '%s' is not supported." % qop) # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (user, realm, nonce, req.get_selector(), respdig) if opaque: base += ', opaque="%s"' % opaque if entdig: base += ', digest="%s"' % entdig base += ', algorithm="%s"' % algorithm if qop: base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) return base
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/urllib2.py/AbstractDigestAuthHandler.get_authorization
7,228
def check_exists(fips_dir) : """test if xcodebuild is in the path :returns: True if xcodebuild is in the path """ try : subprocess.check_output(['xcodebuild', '-version']) return True except (__HOLE__, subprocess.CalledProcessError) : return False #------------------------------------------------------------------------------
OSError
dataset/ETHPy150Open floooh/fips/mod/tools/xcodebuild.py/check_exists
7,229
def run_clean(fips_dir, build_dir) : """run the special 'clean' target :params build_dir: directory where the xcode project file is located :returns: True if xcodebuild returns successful """ try : res = subprocess.call('xcodebuild clean', cwd=build_dir, shell=True) return res == 0 except (__HOLE__, subprocess.CalledProcessError) : return False
OSError
dataset/ETHPy150Open floooh/fips/mod/tools/xcodebuild.py/run_clean
7,230
def manager(self, model): '''Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method.''' try: return self.router[model] except __HOLE__: meta = getattr(model, '_meta', model) if meta.type == 'structure': # this is a structure if hasattr(model, 'model'): structure_model = model.model if structure_model: return self.manager(structure_model) else: manager = self.router.structure(model) if manager: return manager raise InvalidTransaction('"%s" not valid in this session' % meta)
KeyError
dataset/ETHPy150Open lsbardel/python-stdnet/stdnet/odm/session.py/Session.manager
7,231
def mangle(name, klass): if not name.startswith('__'): return name if len(name) + 2 >= MANGLE_LEN: return name if name.endswith('__'): return name try: i = 0 while klass[i] == '_': i = i + 1 except __HOLE__: return name klass = klass[i:] tlen = len(klass) + len(name) if tlen > MANGLE_LEN: klass = klass[:MANGLE_LEN-tlen] return "_%s%s" % (klass, name)
IndexError
dataset/ETHPy150Open anandology/pyjamas/pgen/lib2to3/compiler/misc.py/mangle
7,232
def main(argv): if len(argv) < 2: print "Usage: %s History" % __program__ sys.exit(1) encoding = locale.getpreferredencoding() if encoding.upper() != "UTF-8": print "%s requires an UTF-8 capable console/terminal" % __program__ sys.exit(1) files_to_process = [] for input_glob in argv[1:]: files_to_process += glob.glob(input_glob) for input_file in files_to_process: chrome = ChromeParser(open(input_file)) for timestamp, entry_type, url, data1, data2, data3 in chrome.Parse(): try: date_string = datetime.datetime(1970, 1, 1) date_string += datetime.timedelta(microseconds=timestamp) date_string = u"%s +00:00" % (date_string) except __HOLE__: date_string = timestamp except ValueError: date_string = timestamp output_string = u"%s\t%s\t%s\t%s\t%s\t%s" % ( date_string, entry_type, url, data1, data2, data3) print output_string.encode("UTF-8")
TypeError
dataset/ETHPy150Open google/grr/grr/parsers/chrome_history.py/main
7,233
def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming text/bytes string using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an instance of str """ if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except __HOLE__: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors)
UnicodeDecodeError
dataset/ETHPy150Open openstack/python-rackclient/rackclient/openstack/common/strutils.py/safe_decode
7,234
def string_to_bytes(text, unit_system='IEC', return_int=False): """Converts a string into an float representation of bytes. The units supported for IEC :: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) KB, KiB, MB, MiB, GB, GiB, TB, TiB The units supported for SI :: kb(it), Mb(it), Gb(it), Tb(it) kB, MB, GB, TB Note that the SI unit system does not support capital letter 'K' :param text: String input for bytes size conversion. :param unit_system: Unit system for byte size conversion. :param return_int: If True, returns integer representation of text in bytes. (default: decimal) :returns: Numerical representation of text in bytes. :raises ValueError: If text has an invalid value. """ try: base, reg_ex = UNIT_SYSTEM_INFO[unit_system] except __HOLE__: msg = _('Invalid unit system: "%s"') % unit_system raise ValueError(msg) match = reg_ex.match(text) if match: magnitude = float(match.group(1)) unit_prefix = match.group(2) if match.group(3) in ['b', 'bit']: magnitude /= 8 else: msg = _('Invalid string format: %s') % text raise ValueError(msg) if not unit_prefix: res = magnitude else: res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) if return_int: return int(math.ceil(res)) return res
KeyError
dataset/ETHPy150Open openstack/python-rackclient/rackclient/openstack/common/strutils.py/string_to_bytes
7,235
def readRegister(address): try: byteval = i2c.readU8(address) if (debug): print("TSL2561.readRegister: returned 0x%02X from reg 0x%02X" % (byteval, address)) return byteval except __HOLE__: print("TSL2561.readRegister: error reading byte from reg 0x%02X" % address) return -1
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_i2c_digital_light_sensor/grove_i2c_digital_light_sensor.py/readRegister
7,236
def writeRegister(address, val): try: i2c.write8(address, val) if (debug): print("TSL2561.writeRegister: wrote 0x%02X to reg 0x%02X" % (val, address)) except __HOLE__: print("TSL2561.writeRegister: error writing byte to reg 0x%02X" % address) return -1
IOError
dataset/ETHPy150Open DexterInd/GrovePi/Software/Python/grove_i2c_digital_light_sensor/grove_i2c_digital_light_sensor.py/writeRegister
7,237
def _check_result(self, response, result): try: error = result.get('error', None) reason = result.get('reason', None) except __HOLE__: error = None reason = '' # This is here because couchdb can return http 201 # but containing a list of conflict errors if error == 'conflict' or error == "file_exists": raise exceptions.Conflict(reason or "Conflict") if response.status_code > 205: if response.status_code == 404 or error == 'not_found': raise exceptions.NotFound(reason or 'Not found') elif error == 'bad_request': raise exceptions.BadRequest(reason or "Bad request") raise exceptions.GenericError(result)
AttributeError
dataset/ETHPy150Open histrio/py-couchdb/pycouchdb/resource.py/Resource._check_result
7,238
def handle(self, *args, **options): email = options['email'] try: profile = UserProfile.objects.get(email=email) except __HOLE__: raise CommandError("No such user.") if options['grant']: if profile.has_perm(options['permission'], profile.realm): raise CommandError("User already has permission for this realm.") else: if options['ack']: do_change_is_admin(profile, True, permission=options['permission']) print("Done!") else: print("Would have granted %s %s rights for %s" % (email, options['permission'], profile.realm.domain)) else: if profile.has_perm(options['permission'], profile.realm): if options['ack']: do_change_is_admin(profile, False, permission=options['permission']) print("Done!") else: print("Would have removed %s's %s rights on %s" % (email, options['permission'], profile.realm.domain)) else: raise CommandError("User did not have permission for this realm!")
ValidationError
dataset/ETHPy150Open zulip/zulip/zerver/management/commands/knight.py/Command.handle
7,239
def image_exists(fn): try: with open(fn) as f: pass except __HOLE__ as e: return False return True
IOError
dataset/ETHPy150Open kencoken/imsearch-tools/imsearchtools/process/imutils.py/image_exists
7,240
def run(*tests): root = tk.Tk() root.title('IDLE htest') root.resizable(0, 0) _initializeTkVariantTests(root) # a scrollable Label like constant width text widget. frameLabel = tk.Frame(root, padx=10) frameLabel.pack() text = tk.Text(frameLabel, wrap='word') text.configure(bg=root.cget('bg'), relief='flat', height=4, width=70) scrollbar = tk.Scrollbar(frameLabel, command=text.yview) text.config(yscrollcommand=scrollbar.set) scrollbar.pack(side='right', fill='y', expand=False) text.pack(side='left', fill='both', expand=True) test_list = [] # List of tuples of the form (spec, callable widget) if tests: for test in tests: test_spec = globals()[test.__name__ + '_spec'] test_spec['name'] = test.__name__ test_list.append((test_spec, test)) else: for k, d in globals().items(): if k.endswith('_spec'): test_name = k[:-5] test_spec = d test_spec['name'] = test_name mod = import_module('idlelib.' + test_spec['file']) test = getattr(mod, test_name) test_list.append((test_spec, test)) test_name = [tk.StringVar('')] callable_object = [None] test_kwds = [None] def next(): if len(test_list) == 1: next_button.pack_forget() test_spec, callable_object[0] = test_list.pop() test_kwds[0] = test_spec['kwds'] test_kwds[0]['parent'] = root test_name[0].set('Test ' + test_spec['name']) text.configure(state='normal') # enable text editing text.delete('1.0','end') text.insert("1.0",test_spec['msg']) text.configure(state='disabled') # preserve read-only property def run_test(): widget = callable_object[0](**test_kwds[0]) try: print(widget.result) except __HOLE__: pass button = tk.Button(root, textvariable=test_name[0], command=run_test) button.pack() next_button = tk.Button(root, text="Next", command=next) next_button.pack() next() root.mainloop()
AttributeError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/idle_test/htest.py/run
7,241
def read_args(docstring): lines = iter(docstring.splitlines(False)) rec_line = '' try: # Find the "Args:" line line = next(lines) while line.strip() != 'Args:': line = next(lines) indent_header = get_indent(line) indent_item = None # Loop on rest of lines, adding indented lines to the previous one line = next(lines) while line.strip(): indent = get_indent(line) line = line.strip() if indent_item is None: indent_item = indent if indent_item <= indent_header: break rec_line = line elif indent > indent_item: rec_line += ' ' + line elif indent < indent_item: break elif rec_line: yield rec_line rec_line = line line = next(lines) except __HOLE__: pass if rec_line: yield rec_line
StopIteration
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/tensorflow/init.py/read_args
7,242
def run(self, name): driver = self._get_driver() # go from name to Node Object try: node = [n for n in driver.list_nodes() if n.extra['hostname'] == name][0] except __HOLE__: raise Exception("Node with name {} not found in Softlayer".format(name)) # destroy the node self.logger.info('Destroying node...') node = driver.destroy_node(node) self.logger.info('Node successfully destroyed: {}'.format(node)) return
IndexError
dataset/ETHPy150Open StackStorm/st2contrib/packs/softlayer/actions/destroy_instance.py/SoftlayerDeleteInstance.run
7,243
def process_request(self, request): try: real_ip = request.META['HTTP_X_FORWARDED_FOR'] except __HOLE__: return None else: # pragma: no cover # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The # client's IP will be the first one. real_ip = real_ip.split(",")[0].strip() request.META['REMOTE_ADDR'] = real_ip
KeyError
dataset/ETHPy150Open erikr/happinesspackets/happinesspackets/utils/middleware.py/SetRemoteAddrFromForwardedFor.process_request
7,244
def post(self, action, object, attrs, extra_items=None): """ Post: send an action to the OpenSRS API Parameters: action - the name of the action (ie. sw_register, name_suggest, etc) object - the object type to operate on (ie. domain, trust_service) attrs - a data struct to construct the attributes from (see example) extra_items - any extra top level items (ie. registrant_ip) Example: opensrs.post("sw_register", "domain", attrs={ "domain": "example.com", "auto_renew": 1, "link_domains": 0, "reg_type": "new", "contact_set": { "owner": { ... }, "admin": { ... }, "billing": { ... }, "tech": { ... }, }, "nameserver_list": [ { "sortorder": 1, "name": "ns1.fatbox.ca", }, { "sortorder": 2, "name": "ns2.fatbox.ca", }, ], }, extra_items = { "registrant_ip": "1.2.3.4", }, ) """ def xml_to_data(elm, is_list=False): """ This converts an element that has a bunch of 'item' tags as children into a Python data structure. If is_list is true it is assumed that the child items all have numeric indices and should be treated as a list, else they are treated as a dict """ if is_list: data = [] else: data = {} for child in elm: if child.tag == 'item': if len(child) > 0: if child[0].tag == 'dt_assoc': new_data = xml_to_data(child[0]) elif child[0].tag == 'dt_array': new_data = xml_to_data(child[0], is_list=True) else: new_data = str(child.text) key = child.get('key') if is_list: data.insert(int(key), new_data) else: data[key] = new_data return data def data_to_xml(elm, key, data): """ data_to_xml adds a item sub element to elm and then sets the text if its not a list or dict, otherwise it recurses """ item = SubElement(elm, 'item', { 'key': key }) if isinstance(data, dict): data_to_dt_assoc(item, data) elif isinstance(data, list): data_to_dt_array(item, data) else: item.text = str(data) return item def data_to_dt_assoc(elm, data): """ Adds an associative array of data in the format that opensrs requires, uses data_to_xml to recurse """ _dt_assoc = SubElement(elm, 'dt_assoc') for key in data.keys(): data_to_xml(_dt_assoc, key, data[key]) def data_to_dt_array(elm, list): """ Adds an list of data in the format that opensrs requires, uses data_to_xml to recurse """ _dt_array = SubElement(elm, 'dt_array') key = 0 for ent in list: data_to_xml(_dt_array, str(key), ent) key += 1 # build our XML structure env = Element("OPS_envelope") # add the header header = SubElement(env, 'header') version = SubElement(header, 'version') version.text = str(OPENSRS_VERSION) # add the body body = SubElement(env, 'body') data_block = SubElement(body, 'data_block') # build our parameters params = { 'protocol': 'XCP', 'action': action, 'object': object, 'attributes': attrs, } # add the extra items if we've been provided any if isinstance(extra_items, dict): params.update(extra_items) data_to_dt_assoc(data_block, params); data = "%s%s" % (OPENSRS_XML_HEADER, tostring(env)) # create our signature: # MD5(MD5(data + private_key)+private_key) signature = hashlib.md5("%s%s" % (hashlib.md5("%s%s" % (data, self.private_key)).hexdigest(), self.private_key)).hexdigest() # send our post try: resp, content = self.H.request(self.server, "POST", body=data, headers={ 'Content-Type': 'text/xml', 'X-Username': self.username, 'X-Signature': signature, 'Content-Length': str(len(data)), }) except httplib2.ServerNotFoundError: raise OpenSRSHTTPException("DNS is not working for us.") except __HOLE__: raise OpenSRSHTTPException("Are we offline?") if resp.status == 200: # parse the XML response dom = fromstring(content) # check the version version = dom.find('header/version') if version == None: raise OpenSRSXMLException("Response did not contain a version") if version.text > OPENSRS_VERSION: raise OpenSRSXMLException("Response version is newer than we understand! Response: %s -- Supported: %s" % (version.text, OPENSRS_VERSION)) # find our response data data_block = dom.find('body/data_block/dt_assoc') if data_block == None: raise OpenSRSXMLException("Response did not contain valid data (could not find body/data_block/dt_assoc)") # convert data = xml_to_data(data_block) return data else: raise OpenSRSHTTPException("Status returned from POST was not 200")
AttributeError
dataset/ETHPy150Open fatbox/OpenSRS-py/opensrs/__init__.py/OpenSRS.post
7,245
def test_subclass_extension_category(self): """ Category subclass does not extend base class. This test demonstrates that traits allows subclassing of a category class, but that the traits from the subclass are not actually added to the base class of the Category. Seems like the declaration of the subclass (BasePlusPlus) should fail. """ try: x = self.base.pp self.fail(msg="base.pp should have thrown AttributeError " "as Category subclassing is not supported.") except __HOLE__: pass basepp = BasePlusPlus() return
AttributeError
dataset/ETHPy150Open enthought/traits/traits/tests/test_category.py/CategoryTestCase.test_subclass_extension_category
7,246
def zephyr_bulk_subscribe(subs): try: zephyr._z.subAll(subs) except __HOLE__: # Since we haven't added the subscription to # current_zephyr_subs yet, we can just return (so that we'll # continue processing normal messages) and we'll end up # retrying the next time the bot checks its subscriptions are # up to date. logger.exception("Error subscribing to streams (will retry automatically):") logger.warning("Streams were: %s" % ([cls for cls, instance, recipient in subs],)) return try: actual_zephyr_subs = [cls for (cls, _, _) in zephyr._z.getSubscriptions()] except IOError: logger.exception("Error getting current Zephyr subscriptions") # Don't add anything to current_zephyr_subs so that we'll # retry the next time we check for streams to subscribe to # (within 15 seconds). return for (cls, instance, recipient) in subs: if cls not in actual_zephyr_subs: logger.error("Zephyr failed to subscribe us to %s; will retry" % (cls,)) try: # We'll retry automatically when we next check for # streams to subscribe to (within 15 seconds), but # it's worth doing 1 retry immediately to avoid # missing 15 seconds of messages on the affected # classes zephyr._z.sub(cls, instance, recipient) except IOError: pass else: current_zephyr_subs.add(cls)
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/zephyr_bulk_subscribe
7,247
def maybe_kill_child(): try: if child_pid is not None: os.kill(child_pid, signal.SIGTERM) except __HOLE__: # We don't care if the child process no longer exists, so just log the error logger.exception("")
OSError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/maybe_kill_child
7,248
def maybe_restart_mirroring_script(): if os.stat(os.path.join(options.root_path, "stamps", "restart_stamp")).st_mtime > start_time or \ ((options.user == "tabbott" or options.user == "tabbott/extra") and os.stat(os.path.join(options.root_path, "stamps", "tabbott_stamp")).st_mtime > start_time): logger.warning("") logger.warning("zephyr mirroring script has been updated; restarting...") maybe_kill_child() try: zephyr._z.cancelSubs() except __HOLE__: # We don't care whether we failed to cancel subs properly, but we should log it logger.exception("") while True: try: os.execvp(os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py"), sys.argv) except Exception: logger.exception("Error restarting mirroring script; trying again... Traceback:") time.sleep(1)
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/maybe_restart_mirroring_script
7,249
def parse_zephyr_body(zephyr_data): try: (zsig, body) = zephyr_data.split("\x00", 1) except __HOLE__: (zsig, body) = ("", zephyr_data) return (zsig, body)
ValueError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/parse_zephyr_body
7,250
def parse_crypt_table(zephyr_class, instance): try: crypt_table = open(os.path.join(os.environ["HOME"], ".crypt-table")) except __HOLE__: return None for line in crypt_table.readlines(): if line.strip() == "": # Ignore blank lines continue match = re.match("^crypt-(?P<class>[^:]+):\s+((?P<algorithm>(AES|DES)):\s+)?(?P<keypath>\S+)$", line) if match is None: # Malformed crypt_table line logger.debug("Invalid crypt_table line!") continue groups = match.groupdict() if groups['class'].lower() == zephyr_class and 'keypath' in groups and \ groups.get("algorithm") == "AES": return groups["keypath"] return None
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/parse_crypt_table
7,251
def zephyr_init_autoretry(): backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: # zephyr.init() tries to clear old subscriptions, and thus # sometimes gets a SERVNAK from the server zephyr.init() backoff.succeed() return except __HOLE__: logger.exception("Error initializing Zephyr library (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not initialize Zephyr library, quitting!")
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/zephyr_init_autoretry
7,252
def zephyr_load_session_autoretry(session_path): backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: session = open(session_path, "r").read() zephyr._z.initialize() zephyr._z.load_session(session) zephyr.__inited = True return except __HOLE__: logger.exception("Error loading saved Zephyr session (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not load saved Zephyr session, quitting!")
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/zephyr_load_session_autoretry
7,253
def zephyr_subscribe_autoretry(sub): backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: zephyr.Subscriptions().add(sub) backoff.succeed() return except __HOLE__: # Probably a SERVNAK from the zephyr server, but log the # traceback just in case it's something else logger.exception("Error subscribing to personals (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not subscribe to personals, quitting!")
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/zephyr_subscribe_autoretry
7,254
def die_gracefully(signal, frame): if CURRENT_STATE == States.ZulipToZephyr or CURRENT_STATE == States.ChildSending: # this is a child process, so we want os._exit (no clean-up necessary) os._exit(1) if CURRENT_STATE == States.ZephyrToZulip and not options.use_sessions: try: # zephyr=>zulip processes may have added subs, so run cancelSubs zephyr._z.cancelSubs() except __HOLE__: # We don't care whether we failed to cancel subs properly, but we should log it logger.exception("") sys.exit(1)
IOError
dataset/ETHPy150Open zulip/zulip/bots/zephyr_mirror_backend.py/die_gracefully
7,255
def __init__(self, params): timeout = params.get('timeout', 300) try: timeout = int(timeout) except (__HOLE__, TypeError): timeout = 300 self.default_timeout = timeout
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/cache/backends/base.py/BaseCache.__init__
7,256
def write(s): def _retry(func, *args): while True: try: func(*args) except __HOLE__, e: if e.errno != errno.EAGAIN: raise else: break _retry(sys.stdout.write, s) _retry(sys.stdout.flush)
IOError
dataset/ETHPy150Open gooli/termenu/ansi.py/write
7,257
def run(self): """ get json with build config from path """ path = self.path or CONTAINER_BUILD_JSON_PATH try: with open(path, 'r') as build_cfg_fd: build_cfg_json = json.load(build_cfg_fd) except ValueError: self.log.error("couldn't decode json from file '%s'", path) return None except __HOLE__: self.log.error("couldn't read json from file '%s'", path) return None else: return self.substitute_configuration(build_cfg_json)
IOError
dataset/ETHPy150Open projectatomic/atomic-reactor/atomic_reactor/plugins/input_path.py/PathInputPlugin.run
7,258
def parseDate(self, dateStr): def parseAgo(s): [amount, unit] = s.split(' ') try: amount = float(amount) except ValueError: raise SearchParseError("Expected a number, got %s" % amount) if amount <= 0: raise SearchParseError("Expected a positive number, got %s" % amount) unitRe = re.compile('^'+unit) keys = [k for k in TimeSearchStmt.amounts.keys() if unitRe.match(k)] if len(keys) == 0: raise SearchParseError("Time unit unknown: %s" % unit) elif len(keys) > 1: raise SearchParseError("Time unit ambiguous: %s matches %s" % (unit, keys)) return round(time.time()) - TimeSearchStmt.amounts[keys[0]] * amount def guessDate(unknownEntries, year=None): def guessStrMonth(s): monthRe = re.compile('^'+s) keys = [k for k in TimeSearchStmt.months.keys() if monthRe.match(k)] if len(keys) == 0: raise SearchParseError("Unknown month: %s" % s) elif len(keys) > 1: raise SearchParseError("Ambiguous month: %s matches %s" % (s, keys)) return TimeSearchStmt.months[keys[0]] if not year: m = None # First heuristic: if month comes first, then year comes last try: e0 = int(unknownEntries[0]) except ValueError: m = guessStrMonth(unknownEntries[0]) try: d = int(unknownEntries[1]) except __HOLE__: raise SearchParseError("Expected day, got %s" % unknownEntries[1]) try: y = int(unknownEntries[2]) except ValueError: raise SearchParseError("Expected year, got %s" % unknownEntries[2]) return (y, m, d) # Second heuristic: if month comes last, then year comes first try: e2 = int(unknownEntries[2]) except ValueError: m = guessStrMonth(unknownEntries[2]) try: d = int(unknownEntries[1]) except ValueError: raise SearchParseError("Expected day, got %s" % unknownEntries[1]) try: y = int(unknownEntries[0]) except ValueError: raise SearchParseError("Expected year, got %s" % unknownEntries[0]) return (y, m, d) # If month is the middle one, decide day and year by size # (year is largest, hopefully year was entered using 4 digits) try: e1 = int(unknownEntries[1]) except ValueError: m = guessStrMonth(unknownEntries[1]) try: d = int(unknownEntries[2]) except ValueError: raise SearchParseError("Expected day or year, got %s" % unknownEntries[2]) try: y = int(unknownEntries[0]) except ValueError: raise SearchParseError("Expected year or year, got %s" % unknownEntries[0]) return (max(y,d), m, min(y, d)) lst = [(e0,0),(e1,1),(e2,2)] lst.sort() return guessDate([str(lst[0][0]), str(lst[1][0])], year=e2) # We know year, decide month using similar heuristics - try string month first, # then decide which is possible try: e0 = int(unknownEntries[0]) except ValueError: m = guessStrMonth(unknownEntries[0]) try: d = int(unknownEntries[1]) except ValueError: raise SearchParseError("Expected day, got %s" % unknownEntries[1]) return (year, m, d) try: e1 = int(unknownEntries[1]) except ValueError: m = guessStrMonth(unknownEntries[1]) try: d = int(unknownEntries[0]) except ValueError: raise SearchParseError("Expected day, got %s" % unknownEntries[0]) return (year, m, d) if e0 > 12: return (year, e1, e0) else: return (year, e0, e1) dateStr = dateStr.lower().lstrip().rstrip() if dateStr.endswith(" ago"): return parseAgo(dateStr[:-4]) if dateStr == "yesterday": lst = list(time.localtime(round(time.time()) - TimeSearchStmt.oneDay)) # Reset hour, minute, second lst[3] = 0 lst[4] = 0 lst[5] = 0 return time.mktime(lst) if dateStr == "today": lst = list(time.localtime()) # Reset hour, minute, second lst[3] = 0 lst[4] = 0 lst[5] = 0 return time.mktime(lst) if dateStr.startswith("this "): rest = dateStr[5:] lst = list(time.localtime(round(time.time()))) if rest == "minute": lst[5] = 0 elif rest == "hour": lst[5] = 0 lst[4] = 0 elif rest == "day": lst[5] = 0 lst[4] = 0 lst[3] = 0 elif rest == "week": # weeks start on monday lst[5] = 0 lst[4] = 0 lst[3] = 0 # This hack saves me the hassle of computing negative days, months, etc lst = list(time.localtime(time.mktime(lst) - TimeSearchStmt.oneDay * lst[6])) elif rest == "month": lst[5] = 0 lst[4] = 0 lst[3] = 0 lst[2] = 1 elif rest == "year": lst[5] = 0 lst[4] = 0 lst[3] = 0 lst[2] = 1 lst[1] = 1 return time.mktime(lst) result = [x.match(dateStr) for x in TimeSearchStmt.dateRE] this = list(time.localtime()) def setTwoDate(g): d = guessDate(g, year=this[0]) this[0] = d[0] this[1] = d[1] this[2] = d[2] def setThreeDate(g): d = guessDate(g) this[0] = d[0] this[1] = d[1] this[2] = d[2] def setTwoTime(g): this[3] = int(g[0]) this[4] = int(g[1]) this[5] = 0 def setThreeTime(g): this[3] = int(g[0]) this[4] = int(g[1]) this[5] = int(g[2]) if result[0]: setTwoDate(result[0].groups()) setTwoTime([0,0]) elif result[1]: setThreeDate(result[1].groups()) setTwoTime([0,0]) elif result[2]: setTwoTime(result[2].groups()) elif result[3]: setThreeTime(result[3].groups()) elif result[4]: g = result[4].groups() setTwoDate([g[0], g[1]]) setTwoTime([g[2], g[3]]) elif result[5]: g = result[5].groups() setTwoDate([g[0], g[1]]) setThreeTime([g[2], g[3], g[4]]) elif result[6]: g = result[6].groups() setThreeDate([g[0], g[1], g[2]]) setTwoTime([g[3], g[4]]) elif result[7]: g = result[7].groups() setThreeDate([g[0], g[1], g[2]]) setThreeTime([g[3], g[4], g[5]]) elif result[8]: g = result[8].groups() setTwoTime([g[0], g[1]]) setTwoDate([g[2], g[3]]) elif result[9]: g = result[9].groups() setTwoTime([g[0], g[1]]) setThreeDate([g[2], g[3], g[4]]) elif result[10]: g = result[10].groups() setThreeTime([g[0], g[1], g[2]]) setTwoDate([g[3], g[4]]) elif result[11]: g = result[11].groups() setThreeTime([g[0], g[1], g[2]]) setThreeDate([g[3], g[4],g[5]]) else: raise SearchParseError("Expected a date, got '%s'" % dateStr) return time.mktime(this)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/query/version.py/TimeSearchStmt.parseDate
7,259
@internationalizeDocstring def sample(self, irc, msg, args, num, things): """<num> <arg> [<arg> ...] Randomly chooses <num> items out of the arguments given. """ try: samp = random.sample(things, num) irc.reply(' '.join(samp)) except __HOLE__ as e: irc.error('%s' % (e,))
ValueError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Utilities/plugin.py/Utilities.sample
7,260
def ensure_tree(path): """Create a directory (and any ancestor directories required) :param path: Directory to create """ try: os.makedirs(path) except __HOLE__ as exc: if exc.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise
OSError
dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/fileutils.py/ensure_tree
7,261
def delete_if_exists(path): """Delete a file, but ignore file not found error. :param path: File to delete """ try: os.unlink(path) except __HOLE__ as e: if e.errno == errno.ENOENT: return else: raise
OSError
dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/fileutils.py/delete_if_exists
7,262
def __init__(self, cself = None): if cself != None: for header_name in [x + '_header' for x in self.all_headers]: try: setattr(self, header_name, getattr(cself, header_name).getCopy()) except __HOLE__: pass self.a_headers = [x for x in cself.a_headers] return self.a_headers = []
AttributeError
dataset/ETHPy150Open sippy/b2bua/sippy/SdpMediaDescription.py/SdpMediaDescription.__init__
7,263
def test_no_reuse(): x = T.lvector() y = T.lvector() f = theano.function([x, y], x + y) # provide both inputs in the first call f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64')) try: f(numpy.ones(10)) except __HOLE__: return assert not 'should not get here'
TypeError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/tests/test_gc.py/test_no_reuse
7,264
def smoke_test_element_creation(): # grab the set of elements represented by 'Z' elements = sorted([elm for abbrev, elm in six.iteritems(basic) if isinstance(abbrev, int)]) for e in elements: sym = e.sym name = e.name # make sure that the elements can be initialized with Z or any # combination of element symbols or the element name inits = [sym, sym.upper(), sym.lower(), sym.swapcase(), name, name.upper(), name.lower(), name.swapcase()] # loop over the initialization routines to smoketest element creation for init in inits: elem = BasicElement(init) # create an element with the Z value elem = BasicElement(e.Z) str(elem) # obtain all attribute fields of the Element to ensure it is # behaving correctly for field in element._fields: tuple_attr = getattr(basic[e.Z], field) elem_attr_dct = elem[str(field)] elem_attr = getattr(elem, field) # shield the assertion from any elements whose density is # unknown try: if np.isnan(tuple_attr): continue except __HOLE__: pass assert_equal(elem_attr_dct, tuple_attr) assert_equal(elem_attr, tuple_attr) assert_equal(elem_attr_dct, elem_attr) # test the comparators for e1, e2 in zip(elements, elements[1:]): # compare prev_element to element assert_equal(e1.__lt__(e2), True) assert_equal(e1 < e2, True) assert_equal(e1.__eq__(e2), False) assert_equal(e1 == e2, False) assert_equal(e1 >= e2, False) assert_equal(e1 > e2, False) # compare element to prev_element assert_equal(e2 < e1, False) assert_equal(e2.__lt__(e1), False) assert_equal(e2 <= e1, False) assert_equal(e2.__eq__(e1), False) assert_equal(e2 == e1, False) assert_equal(e2 >= e1, True) assert_equal(e2 > e1, True)
TypeError
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/constants/tests/test_basic.py/smoke_test_element_creation
7,265
def reload_database( self ): # {{{ # create the xapian handlers self.db = xapian.WritableDatabase( self.database,xapian.DB_CREATE_OR_OPEN ) self.qp = xapian.QueryParser() self.qp.set_database(self.db) # needed for incremental search self.qp.set_stemmer( xapian.Stem( self.language ) ) self.qp.set_stemming_strategy( self.qp.STEM_SOME ) self.qp.add_prefix( "title","S" ) self.tg = xapian.TermGenerator() self.tg.set_stemmer( xapian.Stem( self.language ) ) try: self.tg.set_stemming_strategy( self.tg.STEM_SOME ) except __HOLE__: pass self.e = xapian.Enquire(self.db) self.sorted_e = xapian.Enquire(self.db) # Value 2 is the lowercase form of the title self.sorted_e.set_sort_by_value(2,False) #}}}
AttributeError
dataset/ETHPy150Open cwoac/nvim/python/nvim.py/Nvimdb.reload_database
7,266
def floyd_warshall_numpy(G, nodelist=None, weight='weight'): """Find all-pairs shortest path lengths using Floyd's algorithm. Parameters ---------- G : NetworkX graph nodelist : list, optional The rows and columns are ordered by the nodes in nodelist. If nodelist is None then the ordering is produced by G.nodes(). weight: string, optional (default= 'weight') Edge data key corresponding to the edge weight. Returns ------- distance : NumPy matrix A matrix of shortest path distances between nodes. If there is no path between to nodes the corresponding matrix entry will be Inf. Notes ------ Floyd's algorithm is appropriate for finding shortest paths in dense graphs or graphs with negative weights when Dijkstra's algorithm fails. This algorithm can still fail if there are negative cycles. It has running time O(n^3) with running space of O(n^2). """ try: import numpy as np except __HOLE__: raise ImportError(\ "to_numpy_matrix() requires numpy: http://scipy.org/ ") A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min, weight=weight) n,m = A.shape I = np.identity(n) A[A==0] = np.inf # set zero entries to inf A[I==1] = 0 # except diagonal which should be zero for i in range(n): A = np.minimum(A, A[i,:] + A[:,i]) return A
ImportError
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/shortest_paths/dense.py/floyd_warshall_numpy
7,267
def __init__(self, uri=None, **kwargs): """Initialize the socket and initialize pdb.""" params = DEFAULT_PARAMS.copy() params.update(parse_irc_uri(uri)) params.update(kwargs) # Backup stdin and stdout before replacing them by the socket handle self.old_stdout = sys.stdout self.old_stdin = sys.stdin self.read_timeout = 0.1 if not params.get('limit_access_to'): raise NoAllowedNicknamesSelected( "You must specify a list of nicknames that are allowed " "to interact with the debugger using the " "`limit_access_to` keyword argument." ) elif isinstance(params.get('limit_access_to'), six.string_types): params['limit_access_to'] = [params.get('limit_access_to')] connect_params = {} if not params.get('nickname'): params['nickname'] = socket.gethostname().split('.')[0] if not params.get('channel'): raise NoChannelSelected( "You must specify a channel to connect to using the " "`channel` keyword argument." ) if params.get('ssl'): connect_params['connect_factory'] = ( Factory(wrapper=ssllib.wrap_socket) ) # Writes to stdout are forbidden in mod_wsgi environments try: logger.info( "ircpdb has connected to %s:%s on %s\n", params.get('server'), params.get('port'), params.get('channel') ) except __HOLE__: pass r_pipe, w_pipe = os.pipe() # The A pipe is from the bot to pdb self.p_A_pipe = os.fdopen(r_pipe, 'r') self.b_A_pipe = os.fdopen(w_pipe, 'w') r_pipe, w_pipe = os.pipe() # The B pipe is from pdb to the bot self.b_B_pipe = os.fdopen(r_pipe, 'r') self.p_B_pipe = os.fdopen(w_pipe, 'w') pdb.Pdb.__init__( self, stdin=self.p_A_pipe, stdout=self.p_B_pipe, ) self.bot = IrcpdbBot( channel=params.get('channel'), nickname=params.get('nickname'), server=params.get('server'), port=params.get('port'), password=params.get('password'), limit_access_to=params.get('limit_access_to'), message_wait_seconds=params.get('message_wait_seconds'), dpaste_minimum_response_length=( params.get('dpaste_minimum_response_length') ), activation_timeout=params.get('activation_timeout'), **connect_params )
IOError
dataset/ETHPy150Open coddingtonbear/ircpdb/ircpdb/debugger.py/Ircpdb.__init__
7,268
def shutdown(self): """Revert stdin and stdout, close the socket.""" sys.stdout = self.old_stdout sys.stdin = self.old_stdin pipes = [ self.p_A_pipe, self.p_B_pipe, self.b_A_pipe, self.b_B_pipe ] for pipe in pipes: try: pipe.close() except __HOLE__: logger.warning( "IOError encountered while closing a pipe; messages " "may have been lost." ) self.bot.disconnect()
IOError
dataset/ETHPy150Open coddingtonbear/ircpdb/ircpdb/debugger.py/Ircpdb.shutdown
7,269
def ps(cmd): """Expects a ps command with a -o argument and parse the result returning only the value of interest. """ if not LINUX: cmd = cmd.replace(" --no-headers ", " ") if SUNOS: cmd = cmd.replace("-o command", "-o comm") cmd = cmd.replace("-o start", "-o stime") p = subprocess.Popen(cmd, shell=1, stdout=subprocess.PIPE) output = p.communicate()[0].strip() if PY3: output = str(output, sys.stdout.encoding) if not LINUX: output = output.split('\n')[1].strip() try: return int(output) except __HOLE__: return output
ValueError
dataset/ETHPy150Open giampaolo/psutil/psutil/tests/test_posix.py/ps
7,270
def test_exe(self): ps_pathname = ps("ps --no-headers -o command -p %s" % self.pid).split(' ')[0] psutil_pathname = psutil.Process(self.pid).exe() try: self.assertEqual(ps_pathname, psutil_pathname) except __HOLE__: # certain platforms such as BSD are more accurate returning: # "/usr/local/bin/python2.7" # ...instead of: # "/usr/local/bin/python" # We do not want to consider this difference in accuracy # an error. adjusted_ps_pathname = ps_pathname[:len(ps_pathname)] self.assertEqual(ps_pathname, adjusted_ps_pathname)
AssertionError
dataset/ETHPy150Open giampaolo/psutil/psutil/tests/test_posix.py/TestProcess.test_exe
7,271
def test_redis_storage(self): if os.environ.get('NONETWORK'): return try: from kivy.storage.redisstore import RedisStore from redis.exceptions import ConnectionError try: params = dict(db=15) self._do_store_test_empty(RedisStore(params)) self._do_store_test_filled(RedisStore(params)) except ConnectionError: pass except __HOLE__: pass
ImportError
dataset/ETHPy150Open kivy/kivy/kivy/tests/test_storage.py/StorageTestCase.test_redis_storage
7,272
@arg('--flavor', default = None, metavar = '<flavor>', help = "Flavor ID (see 'cloudservers flavors'). Defaults to 256MB RAM instance.") @arg('--image', default = None, metavar = '<image>', help = "Image ID (see 'cloudservers images'). Defaults to Ubuntu 10.04 LTS.") @arg('--ipgroup', default = None, metavar = '<group>', help = "IP group name or ID (see 'cloudservers ipgroup-list'). DEPRICATED in OpenStack") @arg('--meta', metavar = "<key=value>", action = 'append', default = [], help = "Record arbitrary key/value metadata. May be give multiple times.") @arg('--file', metavar = "<dst-path=src-path>", action = 'append', dest = 'files', default = [], help = "Store arbitrary files from <src-path> locally to <dst-path> "\ "on the new server. You may store up to 5 files.") @arg('--key', metavar = '<path>', nargs = '?', const = AUTO_KEY, help = "Key the server with an SSH keypair. Looks in ~/.ssh for a key, "\ "or takes an explicit <path> to one.") @arg('name', metavar='<name>', help='Name for the new server') def do_boot(self, args): """Boot a new server.""" flavor = args.flavor or self.compute.flavors.find(ram=256) image = args.image or self.compute.images.find(name="Ubuntu 10.04 LTS (lucid)") # Map --ipgroup <name> to an ID. # XXX do this for flavor/image? if args.ipgroup: ipgroup = self._find_ipgroup(args.ipgroup) else: ipgroup = None metadata = dict(v.split('=') for v in args.meta) files = {} for f in args.files: dst, src = f.split('=', 1) try: files[dst] = open(src) except __HOLE__, e: raise CommandError("Can't open '%s': %s" % (src, e)) if args.key is AUTO_KEY: possible_keys = [os.path.join(os.path.expanduser('~'), '.ssh', k) for k in ('id_dsa.pub', 'id_rsa.pub')] for k in possible_keys: if os.path.exists(k): keyfile = k break else: raise CommandError("Couldn't find a key file: tried ~/.ssh/id_dsa.pub or ~/.ssh/id_rsa.pub") elif args.key: keyfile = args.key else: keyfile = None if keyfile: try: files['/root/.ssh/authorized_keys2'] = open(keyfile) except IOError, e: raise CommandError("Can't open '%s': %s" % (keyfile, e)) server = self.compute.servers.create(args.name, image, flavor, ipgroup, metadata, files) print_dict(server._info)
IOError
dataset/ETHPy150Open jacobian-archive/openstack.compute/openstack/compute/shell.py/ComputeShell.do_boot
7,273
@lru_cache(maxsize=1) def _get_cinder_version(): try: return pkg_resources.get_distribution('python-cinderclient').parsed_version except __HOLE__: return '00000001', '00000001', '00000001', '*final'
ValueError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/_get_cinder_version
7,274
@lru_cache(maxsize=1) def _get_neutron_version(): try: return pkg_resources.get_distribution('python-neutronclient').parsed_version except __HOLE__: return '00000002', '00000003', '00000009', '*final'
ValueError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/_get_neutron_version
7,275
@lru_cache(maxsize=1) def _get_nova_version(): try: return pkg_resources.get_distribution('python-novaclient').parsed_version except __HOLE__: return '00000002', '00000020', '00000000', '*final'
ValueError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/_get_nova_version
7,276
def create_tenant_session(self, credentials): try: self.session = self.Session(self, **credentials) except __HOLE__ as e: logger.error('Failed to create OpenStack session.') six.reraise(CloudBackendError, e) return self.session
AttributeError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackClient.create_tenant_session
7,277
def get_resource_stats(self, auth_url): logger.debug('About to get statistics for auth_url: %s', auth_url) try: session = self.create_session(keystone_url=auth_url) nova = self.create_nova_client(session) stats = self.get_hypervisors_statistics(nova) # XXX a temporary workaround for https://bugs.launchpad.net/nova/+bug/1333520 if 'vcpus' in stats: nc_settings = getattr(settings, 'NODECONDUCTOR', {}) openstacks = nc_settings.get('OPENSTACK_OVERCOMMIT', ()) try: openstack = next(o for o in openstacks if o['auth_url'] == auth_url) cpu_overcommit_ratio = openstack.get('cpu_overcommit_ratio', 1) except __HOLE__ as e: logger.debug('Failed to find OpenStack overcommit values for Keystone URL %s', auth_url) cpu_overcommit_ratio = 1 stats['vcpus'] = stats['vcpus'] * cpu_overcommit_ratio except (nova_exceptions.ClientException, keystone_exceptions.ClientException) as e: logger.exception('Failed to get statistics for auth_url: %s', auth_url) six.reraise(CloudBackendError, e) else: logger.debug('Successfully for auth_url: %s was successfully taken', auth_url) return stats
StopIteration
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend.get_resource_stats
7,278
def provision_instance(self, instance, backend_flavor_id, system_volume_id=None, data_volume_id=None): logger.info('About to boot instance %s', instance.uuid) try: membership = instance.cloud_project_membership image = membership.cloud.images.get( template=instance.template, ) session = self.create_session(membership=membership) nova = self.create_nova_client(session) cinder = self.create_cinder_client(session) neutron = self.create_neutron_client(session) # verify if the internal network to connect to exists try: neutron.show_network(membership.internal_network_id) except neutron_exceptions.NeutronClientException: logger.exception('Internal network with id of %s was not found', membership.internal_network_id) raise CloudBackendError('Unable to find network to attach instance to') # instance key name and fingerprint are optional if instance.key_name: safe_key_name = self.sanitize_key_name(instance.key_name) matching_keys = [ key for key in nova.keypairs.findall(fingerprint=instance.key_fingerprint) if key.name.endswith(safe_key_name) ] matching_keys_count = len(matching_keys) if matching_keys_count >= 1: if matching_keys_count > 1: # TODO: warning as we trust that fingerprint+name combo is unique. Potentially reconsider. logger.warning('Found %d public keys with fingerprint "%s", expected exactly one.' + 'Taking the first one', matching_keys_count, instance.key_fingerprint) backend_public_key = matching_keys[0] elif matching_keys_count == 0: logger.error('Found no public keys with fingerprint "%s", expected exactly one', instance.key_fingerprint) # It is possible to fix this situation with OpenStack admin account. So not failing here. # Error log is expected to be addressed. # TODO: consider failing provisioning/putting this check into serializer/pre-save. # reset failed key name/fingerprint instance.key_name = '' # has to be an empty string, if None will fail on MySQL backend instance.key_fingerprint = '' backend_public_key = None else: backend_public_key = matching_keys[0] else: backend_public_key = None backend_flavor = nova.flavors.get(backend_flavor_id) if not system_volume_id: system_volume_name = '{0}-system'.format(instance.name) logger.info('Creating volume %s for instance %s', system_volume_name, instance.uuid) # TODO: need to update system_volume_size as well for the data to be precise size = self.get_backend_disk_size(instance.system_volume_size) system_volume = cinder.volumes.create( size=size, display_name=system_volume_name, display_description='', imageRef=image.backend_id, ) system_volume_id = system_volume.id if not data_volume_id: data_volume_name = '{0}-data'.format(instance.name) logger.info('Creating volume %s for instance %s', data_volume_name, instance.uuid) # TODO: need to update data_volume_size as well for the data to be precise size = self.get_backend_disk_size(instance.data_volume_size) data_volume = cinder.volumes.create( size=size, display_name=data_volume_name, display_description='', ) data_volume_id = data_volume.id if not self._wait_for_volume_status(system_volume_id, cinder, 'available', 'error'): logger.error( 'Failed to boot instance %s: timed out waiting for system volume %s to become available', instance.uuid, system_volume_id, ) raise CloudBackendError('Timed out waiting for instance %s to boot' % instance.uuid) if not self._wait_for_volume_status(data_volume_id, cinder, 'available', 'error'): logger.error( 'Failed to boot instance %s: timed out waiting for data volume %s to become available', instance.uuid, data_volume_id, ) raise CloudBackendError('Timed out waiting for instance %s to boot' % instance.uuid) security_group_ids = instance.security_groups.values_list('security_group__backend_id', flat=True) server_create_parameters = dict( name=instance.name, image=None, # Boot from volume, see boot_index below flavor=backend_flavor, block_device_mapping_v2=[ { 'boot_index': 0, 'destination_type': 'volume', 'device_type': 'disk', 'source_type': 'volume', 'uuid': system_volume_id, 'delete_on_termination': True, }, { 'destination_type': 'volume', 'device_type': 'disk', 'source_type': 'volume', 'uuid': data_volume_id, 'delete_on_termination': True, }, # This should have worked by creating an empty volume. # But, as always, OpenStack doesn't work as advertised: # see https://bugs.launchpad.net/nova/+bug/1347499 # equivalent nova boot options would be # --block-device source=blank,dest=volume,size=10,type=disk # { # 'destination_type': 'blank', # 'device_type': 'disk', # 'source_type': 'image', # 'uuid': backend_image.id, # 'volume_size': 10, # 'shutdown': 'remove', # }, ], nics=[ {'net-id': membership.internal_network_id} ], key_name=backend_public_key.name if backend_public_key is not None else None, security_groups=security_group_ids, ) if membership.availability_zone: server_create_parameters['availability_zone'] = membership.availability_zone if instance.user_data: server_create_parameters['userdata'] = instance.user_data server = nova.servers.create(**server_create_parameters) instance.backend_id = server.id instance.system_volume_id = system_volume_id instance.data_volume_id = data_volume_id instance.save() if not self._wait_for_instance_status(server.id, nova, 'ACTIVE'): logger.error( 'Failed to boot instance %s: timed out while waiting for instance to become online', instance.uuid, ) raise CloudBackendError('Timed out waiting for instance %s to boot' % instance.uuid) instance.start_time = timezone.now() instance.save() logger.debug('About to infer internal ip addresses of instance %s', instance.uuid) try: server = nova.servers.get(server.id) fixed_address = server.addresses.values()[0][0]['addr'] except (nova_exceptions.ClientException, __HOLE__, IndexError): logger.exception('Failed to infer internal ip addresses of instance %s', instance.uuid) else: instance.internal_ips = fixed_address instance.save() logger.info('Successfully inferred internal ip addresses of instance %s', instance.uuid) # Floating ips initialization self.push_floating_ip_to_instance(server, instance, nova) except (glance_exceptions.ClientException, cinder_exceptions.ClientException, nova_exceptions.ClientException, neutron_exceptions.NeutronClientException) as e: logger.exception('Failed to boot instance %s', instance.uuid) event_logger.instance.error( 'Virtual machine {instance_name} creation has failed.', event_type='iaas_instance_creation_failed', event_context={'instance': instance}) six.reraise(CloudBackendError, e) else: logger.info('Successfully booted instance %s', instance.uuid) event_logger.instance.info( 'Virtual machine {instance_name} has been created.', event_type='iaas_instance_creation_succeeded', event_context={'instance': instance}) event_logger.instance.info( 'Virtual machine {instance_name} has been started.', event_type='iaas_instance_start_succeeded', event_context={'instance': instance}) licenses = instance.instance_licenses.all() event_logger.instance_licenses.info( 'Licenses added to VM with name {instance_name}.', event_type='iaas_instance_licenses_added', event_context={ 'instance': instance, 'licenses_types': [l.template_license.license_type for l in licenses], 'licenses_services_types': [l.template_license.service_type for l in licenses], } )
KeyError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend.provision_instance
7,279
def push_floating_ip_to_instance(self, server, instance, nova): if not instance.external_ips or not instance.internal_ips: return logger.debug('About to add external ip %s to instance %s', instance.external_ips, instance.uuid) membership = instance.cloud_project_membership try: floating_ip = membership.floating_ips.get( status__in=('BOOKED', 'DOWN'), address=instance.external_ips, backend_network_id=membership.external_network_id ) server.add_floating_ip(address=instance.external_ips, fixed_address=instance.internal_ips) except ( nova_exceptions.ClientException, ObjectDoesNotExist, MultipleObjectsReturned, KeyError, __HOLE__, ): logger.exception('Failed to add external ip %s to instance %s', instance.external_ips, instance.uuid) instance.set_erred() instance.save() else: floating_ip.status = 'ACTIVE' floating_ip.save() logger.info('Successfully added external ip %s to instance %s', instance.external_ips, instance.uuid)
IndexError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend.push_floating_ip_to_instance
7,280
def release_floating_ip_from_instance(self, instance): if not instance.external_ips: return membership = instance.cloud_project_membership try: floating_ip = membership.floating_ips.get( status='ACTIVE', address=instance.external_ips, backend_network_id=membership.external_network_id ) except (__HOLE__, MultipleObjectsReturned): logger.warning('Failed to release floating ip %s from instance %s', instance.external_ips, instance.uuid) else: floating_ip.status = 'DOWN' floating_ip.save() logger.info('Successfully released floating ip %s from instance %s', instance.external_ips, instance.uuid)
ObjectDoesNotExist
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend.release_floating_ip_from_instance
7,281
def _get_instance_volumes(self, nova, cinder, backend_instance_id): try: attached_volume_ids = [ v.volumeId for v in nova.volumes.get_server_volumes(backend_instance_id) ] if len(attached_volume_ids) != 2: logger.info('Skipping instance %s, only instances with 2 volumes are supported, found %d', backend_instance_id, len(attached_volume_ids)) raise LookupError attached_volumes = [ cinder.volumes.get(volume_id) for volume_id in attached_volume_ids ] # Blessed be OpenStack developers for returning booleans as strings system_volume = next(v for v in attached_volumes if v.bootable == 'true') data_volume = next(v for v in attached_volumes if v.bootable == 'false') except (cinder_exceptions.ClientException, __HOLE__) as e: logger.info('Skipping instance %s, failed to fetch volumes', backend_instance_id) six.reraise(LookupError, e) else: return system_volume, data_volume
StopIteration
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend._get_instance_volumes
7,282
def _get_instance_template(self, system_volume, membership, backend_instance_id): try: image_id = system_volume.volume_image_metadata['image_id'] return models.Template.objects.get( images__backend_id=image_id, images__cloud__cloudprojectmembership=membership, ) except (__HOLE__, AttributeError): logger.info('Skipping instance %s, failed to infer template', backend_instance_id) raise LookupError except (models.Template.DoesNotExist, models.Template.MultipleObjectsReturned): logger.info('Skipping instance %s, failed to infer template', backend_instance_id) raise LookupError
KeyError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend._get_instance_template
7,283
def _get_flavor_info(self, nova, backend_instance): try: flavor_id = backend_instance.flavor['id'] flavor = nova.flavors.get(flavor_id) except (__HOLE__, AttributeError): logger.info('Skipping instance %s, failed to infer flavor info', backend_instance.id) raise LookupError except nova_exceptions.ClientException as e: logger.info('Skipping instance %s, failed to infer flavor info', backend_instance.id) six.reraise(LookupError, e) else: cores = flavor.vcpus ram = self.get_core_ram_size(flavor.ram) return cores, ram, flavor.name
KeyError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend._get_flavor_info
7,284
def _get_instance_start_time(self, instance): try: launch_time = instance.to_dict()['OS-SRV-USG:launched_at'] d = dateparse.parse_datetime(launch_time) except (KeyError, __HOLE__): return None else: # At the moment OpenStack does not provide any timezone info, # but in future it might do. if timezone.is_naive(d): d = timezone.make_aware(d, timezone.utc) return d
ValueError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/iaas/backend.py/OpenStackBackend._get_instance_start_time
7,285
def __getitem__(self, key): try: return super(Resampler, self).__getitem__(key) except (__HOLE__, com.AbstractMethodError): # compat for deprecated if isinstance(self.obj, com.ABCSeries): return self._deprecated()[key] raise
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/tseries/resample.py/Resampler.__getitem__
7,286
def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ self._set_binner() grouper = self.grouper if subset is None: subset = self.obj grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis) # try the key selection try: return grouped[key] except __HOLE__: return grouped
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/tseries/resample.py/Resampler._gotitem
7,287
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ revaluate the obj with a groupby aggregation """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj try: grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) except __HOLE__: # panel grouper grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis) try: result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/tseries/resample.py/Resampler._groupby_and_aggregate
7,288
def get_profile(self): """ Returns site-specific profile for this user. Raises SiteProfileNotAvailable if this site does not allow profiles. When using the App Engine authentication framework, users are created automatically. """ from django.contrib.auth.models import SiteProfileNotAvailable if not hasattr(self, '_profile_cache'): from django.conf import settings if not hasattr(settings, "AUTH_PROFILE_MODULE"): raise SiteProfileNotAvailable try: app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.') model = models.get_model(app_label, model_name) self._profile_cache = model.all().filter("user =", self).get() if not self._profile_cache: raise model.DoesNotExist except (__HOLE__, ImproperlyConfigured): raise SiteProfileNotAvailable return self._profile_cache
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/appengine_django/auth/models.py/User.get_profile
7,289
def get_ensembl_db_name(self, ens_prefix, trackVersion): '''Used by __init__(), obtains Ensembl database name matching the specified UCSC genome version''' ucsc_versions = sqlgraph.SQLTableMultiNoCache(trackVersion, serverInfo=self.ucsc_server) ucsc_versions._distinct_key = 'db' cursor = self.ens_server.cursor() for t in ucsc_versions[self.ucsc_db]: # search rows until success if ens_prefix is None: # Note: this assumes 'source' in hgFixed.trackVersion contains # the URI of the Ensembl data set and that the last path component # of that URI is the species name of that data set. try: ens_prefix1 = t.source.split('/')[-2] except __HOLE__: continue else: ens_prefix1 = ens_prefix cursor.execute("show databases like '%s_core_%s_%%'" % (ens_prefix1, t.version)) try: return cursor.fetchall()[0][0] except IndexError: pass raise KeyError( "Genome %s doesn't exist or has got no Ensembl data at UCSC" % self.ucsc_db)
IndexError
dataset/ETHPy150Open cjlee112/pygr/pygr/apps/ucsc_ensembl_annot.py/UCSCEnsemblInterface.get_ensembl_db_name
7,290
def get_annot_db(self, table, primaryKey='name', sliceAttrDict=dict(id='chrom', start='chromStart', stop='chromEnd')): '''generic method to obtain an AnnotationDB for any annotation table in UCSC, e.g. snp130. If your target table has non-standard name, start, end columns, specify them in the primaryKey and sliceAttrDict args. Saves table as named attribute on this package object.''' try: # return existing db if already cached here return getattr(self, table) except __HOLE__: pass sliceDB = sqlgraph.SQLTable(self.ucsc_db + '.' + table, primaryKey=primaryKey, serverInfo=self.ucsc_server, itemClass=UCSCSeqIntervalRow) annoDB = annotation.AnnotationDB(sliceDB, self.genome_seq, checkFirstID=False, sliceAttrDict=sliceAttrDict) setattr(self, table, annoDB) # cache this db on named attribute return annoDB
AttributeError
dataset/ETHPy150Open cjlee112/pygr/pygr/apps/ucsc_ensembl_annot.py/UCSCEnsemblInterface.get_annot_db
7,291
def __getitem__(self, k): try: return self.data[k] except __HOLE__: # Not cached yet, extract the exon from transcript data. transcripts = self.gRes.ens_transcripts_of_exons_map2[ self.gRes.ens_exon_stable_id[k]].keys() self.data.update(transcripts[0].get_exon_slices()) # Cache whole transcript interval to speed sequence access self.gRes.genome_seq.cacheHint({transcripts[0].id: (transcripts[0].txStart, transcripts[0].txEnd)}, transcripts[0]) return self.data[k]
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/apps/ucsc_ensembl_annot.py/EnsemblExonOnDemandSliceDB.__getitem__
7,292
def channel(self, channel_id=None): try: return self.channels[channel_id] except __HOLE__: return Channel(self, channel_id)
KeyError
dataset/ETHPy150Open celery/kombu/kombu/transport/amqplib.py/Connection.channel
7,293
def do_POST(self): form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST'}) if self.path == '/node_jobs': ip_addr = None for item in form.list: if item.name == 'host': # if it looks like IP address, skip resolving if re.match('^\d+[\.\d]+$', item.value): ip_addr = item.value else: try: ip_addr = socket.gethostbyname(item.value) except: ip_addr = item.value break self._dispy_ctx._cluster_lock.acquire() cluster_infos = [(name, cluster_info) for name, cluster_info in self._dispy_ctx._clusters.items()] self._dispy_ctx._cluster_lock.release() jobs = [] node = None for name, cluster_info in cluster_infos: cluster_node = cluster_info.status.get(ip_addr, None) if not cluster_node: continue if node: node.jobs_done += cluster_node.jobs_done node.cpu_time += cluster_node.cpu_time node.update_time = max(node.update_time, cluster_node.update_time) else: node = copy.copy(cluster_node) cluster_jobs = cluster_info.cluster.node_jobs(ip_addr) # args and kwargs are sent as strings in Python, # so an object's __str__ or __repr__ is used if provided; # TODO: check job is in _dispy_ctx's jobs? jobs.extend([{'uid': id(job), 'job_id': str(job.id), 'args': ', '.join(str(arg) for arg in job.args), 'kwargs': ', '.join('%s=%s' % (key, val) for key, val in job.kwargs.items()), 'sched_time_ms': int(1000 * job.start_time), 'cluster': name} for job in cluster_jobs]) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.end_headers() if node and node.avail_info: node.avail_info = node.avail_info.__dict__ self.wfile.write(json.dumps({'node': node.__dict__, 'jobs': jobs}).encode()) return elif self.path == '/cancel_jobs': uids = [] for item in form.list: if item.name == 'uid': try: uids.append(int(item.value)) except __HOLE__: logger.debug('Cancel job uid "%s" is invalid', item.value) self._dispy_ctx._cluster_lock.acquire() cluster_jobs = [(cluster_info.cluster, cluster_info.jobs.get(uid, None)) for cluster_info in self._dispy_ctx._clusters.values() for uid in uids] self._dispy_ctx._cluster_lock.release() cancelled = [] for cluster, job in cluster_jobs: if not job: continue if cluster.cancel(job) == 0: cancelled.append(id(job)) self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.end_headers() self.wfile.write(json.dumps(cancelled).encode()) return elif self.path == '/add_node': node = {'host': '', 'port': None, 'cpus': 0, 'cluster': None} node_id = None cluster = None for item in form.list: if item.name == 'host': node['host'] = item.value elif item.name == 'cluster': node['cluster'] = item.value elif item.name == 'port': node['port'] = item.value elif item.name == 'cpus': try: node['cpus'] = int(item.value) except: pass elif item.name == 'id': node_id = item.value if node['host']: self._dispy_ctx._cluster_lock.acquire() clusters = [cluster_info.cluster for name, cluster_info in self._dispy_ctx._clusters.items() if name == node['cluster'] or not node['cluster']] self._dispy_ctx._cluster_lock.release() for cluster in clusters: cluster.allocate_node(node) self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() node['id'] = node_id self.wfile.write(json.dumps(node).encode()) return elif self.path == '/set_poll_sec': for item in form.list: if item.name != 'timeout': continue try: timeout = int(item.value) if timeout < 1: timeout = 0 except: logger.warning('HTTP client %s: invalid timeout "%s" ignored', self.client_address[0], item.value) timeout = 0 self._dispy_ctx._poll_sec = timeout self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() return elif self.path == '/set_cpus': node_cpus = {} for item in form.list: self._dispy_ctx._cluster_lock.acquire() for cluster_info in self._dispy_ctx._clusters.values(): node = cluster_info.status.get(item.name, None) if node: node_cpus[item.name] = cluster_info.cluster.set_node_cpus( item.name, item.value) if node_cpus[item.name] >= 0: break self._dispy_ctx._cluster_lock.release() self.send_response(200) self.send_header('Content-Type', 'application/json; charset=utf-8') self.end_headers() self.wfile.write(json.dumps(node_cpus).encode()) return logger.debug('Bad POST request from %s: %s', self.client_address[0], self.path) self.send_error(400) return
ValueError
dataset/ETHPy150Open pgiri/dispy/py2/dispy/httpd.py/DispyHTTPServer._HTTPRequestHandler.do_POST
7,294
def LoadClientConfigFile(self, client_config_file=None): """Loads client configuration file downloaded from APIs console. Loads client config file from path in settings if not specified. :param client_config_file: path of client config file to read. :type client_config_file: str. :raises: InvalidConfigError """ if client_config_file is None: client_config_file = self.settings['client_config_file'] try: client_type, client_info = clientsecrets.loadfile(client_config_file) except clientsecrets.InvalidClientSecretsError as error: raise InvalidConfigError('Invalid client secrets file %s' % error) if not client_type in (clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED): raise InvalidConfigError('Unknown client_type of client config file') try: config_index = ['client_id', 'client_secret', 'auth_uri', 'token_uri'] for config in config_index: self.client_config[config] = client_info[config] self.client_config['revoke_uri'] = client_info.get('revoke_uri') self.client_config['redirect_uri'] = client_info['redirect_uris'][0] except __HOLE__: raise InvalidConfigError('Insufficient client config in file')
KeyError
dataset/ETHPy150Open googledrive/PyDrive/pydrive/auth.py/GoogleAuth.LoadClientConfigFile
7,295
def LoadClientConfigSettings(self): """Loads client configuration from settings file. :raises: InvalidConfigError """ for config in self.CLIENT_CONFIGS_LIST: try: self.client_config[config] = self.settings['client_config'][config] except __HOLE__: raise InvalidConfigError('Insufficient client config in settings')
KeyError
dataset/ETHPy150Open googledrive/PyDrive/pydrive/auth.py/GoogleAuth.LoadClientConfigSettings
7,296
def create_script_tagnames(in_file, out_file): """ Created on 15 Oct 2014 Created for use with Wonderware Archestra to create the if statements used in the scrAssignIO scripts. It takes a text file containing a list of Field Attributes and creates a new text file with the if statements. USAGE: %prog in_file.txt out_file.txt @author: Roan Fourie @mail: roanfourie@gmail.com """ return_data = 0 str_data = '' write_data = '' #current_row = 0 try: with open(out_file, 'at') as fo: fo.seek(0,2) write_data = """'Wait two execution counts to make sure script executes\nIf Me.scrAssignIO.ExecutionCnt > 2 Then\n 'Declare variables\n \tDIM Datasource AS STRING; \tDIM AliasDB AS INDIRECT; """ fo.write(write_data) #print(write_data) fo.close() with open(in_file, 'rt') as fi: for line in fi: #str_data = fi.readline() str_data = line with open(out_file, 'at') as fo: fo.seek(0,2) write_data = '\tDIM '+ str_data.rstrip('\n') + '_Exist AS INTEGER;\n' fo.write(write_data) #print(write_data) fo.close() #print(line) #print(str_data) with open(out_file, 'at') as fo: fo.seek(0,2) write_data = """\n\n'Datasource is the input/output source you want to allocate to your field attribute \tDatasource = Me.PLCPath + "." + Me.Tagname;\n 'Create the alias list that is in the Device Integration object using the PLCPath uda to make it more dynamic \tAliasDB.bindto(Me.PLCPath + ".AliasDataBase"); \t'The exist variables will return an integer value when the string exists in the alias database in the DI object \t'A string comparison is done between the Alias DB list and the Attribute to see if the attribute exists in the Alias DB \t'The StringChar function looks at the ASCII value for a quote which seperates the alias tags from each other in the aliasDB\n """ fo.write(write_data) #print(write_data) with open(in_file, 'rt') as fi: for line in fi: #str_data = fi.readline() str_data = line with open(out_file, 'at') as fo: fo.seek(0,2) write_data = '\t' + str_data.rstrip('\n') + '_Exist = StringInString(AliasDB, (StringChar(34) + Me.TagName + ".'+ str_data.rstrip('\n') +'" + StringChar(34)),1,0);\n' fo.write(write_data) #print(write_data) fo.close() #print(line) with open(out_file, 'at') as fo: fo.seek(0,2) write_data = '\n\n'+"\t'"+ 'The value is then assigned to the DIN Field Attribute input source or unassigned if it is not in the AliasDB\n' fo.write(write_data) #print(write_data) with open(in_file, 'rt') as fi: for line in fi: #str_data = fi.readline() str_data = line with open(out_file, 'at') as fo: fo.seek(0,2) write_data = '\tIf %s_Exist > 0 Then\n\t\tMe.%s.Input.InputSource = Datasource + ".%s";\n\tElse\n\t\tMe.%s.Input.InputSource = "---";\n\tEndIf;\n\n' % (str_data.rstrip('\n'), str_data.rstrip('\n'), str_data.rstrip('\n'), str_data.rstrip('\n')) fo.write(write_data) #print(write_data) fo.close() #print(line) with open(out_file, 'at') as fo: fo.seek(0,2) write_data = """\n'The script is then finished by setting the trigger bit to true and it will stop executing Me.AssignIOCmd = True;\n EndIf;\n 'A Log Message flag is added to see if the script executed in the SMC - This can be excluded and is for troubleshooting 'LogMessage (Me.Tagname + "Finished");\n """ fo.write(write_data) #print(write_data) fo.close() except __HOLE__: print("Error in reading/writing file.") return_data = 2 else: print('Operation completed successfully.') return_data = 1 finally: fi.close() #fo.close() print("done") return return_data
IOError
dataset/ETHPy150Open RoanFourie/ArchestrA-Tools/aaTools/aaCreateScript.py/create_script_tagnames
7,297
def get_language_info(lang_code): from django.conf.locale import LANG_INFO try: return LANG_INFO[lang_code] except __HOLE__: raise KeyError("Unknown language code %r." % lang_code)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/utils/translation/__init__.py/get_language_info
7,298
def teardown(): try: shutil.rmtree(TMPDIR) except __HOLE__: print('Failed to remove {}'.format(TMPDIR))
OSError
dataset/ETHPy150Open bkg/django-spillway/runtests.py/teardown
7,299
def runtests(): if not settings.configured: settings.configure(**DEFAULT_SETTINGS) # Compatibility with Django 1.7's stricter initialization if hasattr(django, 'setup'): django.setup() parent = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, parent) try: from django.test.runner import DiscoverRunner runner_class = DiscoverRunner except __HOLE__: from django.test.simple import DjangoTestSuiteRunner runner_class = DjangoTestSuiteRunner try: status = runner_class( verbosity=1, interactive=True, failfast=False).run_tests(['tests']) except: status = 1 finally: teardown() sys.exit(status)
ImportError
dataset/ETHPy150Open bkg/django-spillway/runtests.py/runtests