Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
100
@classmethod def getInstance(cls): if cls.instance != None: return cls.instance else: cls.instance = TestPlanSettings() try: configFile = sys.argv[1] except __HOLE__: exit("Please specify the configuration file") cls.instance._config = ConfigParser.ConfigParser() assert len(cls.instance._config.read(configFile)) > 0, "Unable to read the config file" return cls.instance
IndexError
dataset/ETHPy150Open azoft-dev-team/imagrium/src/core/testplan_settings.py/TestPlanSettings.getInstance
101
def __init__(self, key_name, data_type, *args, **kwargs): subspec = kwargs.pop('subspec', None) super(KeyTransform, self).__init__(*args, **kwargs) self.key_name = key_name self.data_type = data_type try: output_field = self.TYPE_MAP[data_type] except __HOLE__: # pragma: no cover raise ValueError("Invalid data_type '{}'".format(data_type)) if data_type == 'BINARY': self.output_field = output_field(spec=subspec) else: self.output_field = output_field
KeyError
dataset/ETHPy150Open adamchainz/django-mysql/django_mysql/models/fields/dynamic.py/KeyTransform.__init__
102
def workerProcess(self): """Loop getting clients from the shared queue and process them""" if self.postForkCallback: self.postForkCallback() while self.isRunning.value: try: client = self.serverTransport.accept() self.serveClient(client) except (KeyboardInterrupt, __HOLE__): return 0 except Exception, x: logging.exception(x)
SystemExit
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/thrift-0.9.1/src/server/TProcessPoolServer.py/TProcessPoolServer.workerProcess
103
def serve(self): """Start workers and put into queue""" # this is a shared state that can tell the workers to exit when False self.isRunning.value = True # first bind and listen to the port self.serverTransport.listen() # fork the children for i in range(self.numWorkers): try: w = Process(target=self.workerProcess) w.daemon = True w.start() self.workers.append(w) except Exception, x: logging.exception(x) # wait until the condition is set by stop() while True: self.stopCondition.acquire() try: self.stopCondition.wait() break except (SystemExit, __HOLE__): break except Exception, x: logging.exception(x) self.isRunning.value = False
KeyboardInterrupt
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/thrift-0.9.1/src/server/TProcessPoolServer.py/TProcessPoolServer.serve
104
def num_row_elements(row): '''Get number of elements in CSV row.''' try: rowset = set(row) rowset.discard('') return len(rowset) except __HOLE__: return 0
TypeError
dataset/ETHPy150Open xesscorp/KiPart/kipart/common.py/num_row_elements
105
def get_part_num(csv_reader): '''Get the part number from a row of the CSV file.''' part_num = get_nonblank_row(csv_reader) try: part_num = set(part_num) part_num.discard('') return part_num.pop() except __HOLE__: return None
TypeError
dataset/ETHPy150Open xesscorp/KiPart/kipart/common.py/get_part_num
106
def save_normalized_image(path, data): image_parser = ImageFile.Parser() try: image_parser.feed(data) image = image_parser.close() except __HOLE__: raise return False image.thumbnail(MAX_IMAGE_SIZE, Image.ANTIALIAS) if image.mode != 'RGB': image = image.convert('RGB') image.save(path) return True
IOError
dataset/ETHPy150Open bboe/flask-image-uploader/app.py/save_normalized_image
107
def run(self): if self._cancelled: return conn = None try: conn = self.try_reconnect() except Exception as exc: try: next_delay = next(self.schedule) except __HOLE__: # the schedule has been exhausted next_delay = None # call on_exception for logging purposes even if next_delay is None if self.on_exception(exc, next_delay): if next_delay is None: log.warning( "Will not continue to retry reconnection attempts " "due to an exhausted retry schedule") else: self.scheduler.schedule(next_delay, self.run) else: if not self._cancelled: self.on_reconnection(conn) self.callback(*(self.callback_args), **(self.callback_kwargs)) finally: if conn: conn.close()
StopIteration
dataset/ETHPy150Open datastax/python-driver/cassandra/pool.py/_ReconnectionHandler.run
108
def call(xc, p, qname, contextItem, args): try: cfSig = xc.modelXbrl.modelCustomFunctionSignatures[qname, len(args)] if cfSig is not None and cfSig.customFunctionImplementation is not None: return callCfi(xc, p, qname, cfSig, contextItem, args) elif qname in xc.customFunctions: # plug in method custom functions return xc.customFunctions[qname](xc, p, contextItem, args) # use plug-in's method elif qname not in customFunctions: # compiled functions in this module raise fnFunctionNotAvailable return customFunctions[qname](xc, p, contextItem, args) except (fnFunctionNotAvailable, __HOLE__): raise XPathContext.FunctionNotAvailable("custom function:{0}".format(str(qname)))
KeyError
dataset/ETHPy150Open Arelle/Arelle/arelle/FunctionCustom.py/call
109
@memoize_default() def _get_under_cursor_stmt(self, cursor_txt, start_pos=None): tokenizer = source_tokens(cursor_txt) r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer) try: # Take the last statement available that is not an endmarker. # And because it's a simple_stmt, we need to get the first child. stmt = r.module.children[-2].children[0] except (__HOLE__, IndexError): return None user_stmt = self._parser.user_stmt() if user_stmt is None: # Set the start_pos to a pseudo position, that doesn't exist but # works perfectly well (for both completions in docstrings and # statements). pos = start_pos or self._pos else: pos = user_stmt.start_pos stmt.move(pos[0] - 1, pos[1]) # Moving the offset. stmt.parent = self._parser.user_scope() return stmt
AttributeError
dataset/ETHPy150Open davidhalter/jedi/jedi/api/__init__.py/Script._get_under_cursor_stmt
110
def _goto(self, add_import_name=False): """ Used for goto_assignments and usages. :param add_import_name: Add the the name (if import) to the result. """ def follow_inexistent_imports(defs): """ Imports can be generated, e.g. following `multiprocessing.dummy` generates an import dummy in the multiprocessing module. The Import doesn't exist -> follow. """ definitions = set(defs) for d in defs: if isinstance(d.parent, tree.Import) \ and d.start_pos == (0, 0): i = imports.ImportWrapper(self._evaluator, d.parent).follow(is_goto=True) definitions.remove(d) definitions |= follow_inexistent_imports(i) return definitions goto_path = self._user_context.get_path_under_cursor() context = self._user_context.get_context() user_stmt = self._parser.user_stmt() user_scope = self._parser.user_scope() stmt = self._get_under_cursor_stmt(goto_path) if stmt is None: return [] if user_scope is None: last_name = None else: # Try to use the parser if possible. last_name = user_scope.name_for_position(self._pos) if last_name is None: last_name = stmt while not isinstance(last_name, tree.Name): try: last_name = last_name.children[-1] except __HOLE__: # Doesn't have a name in it. return [] if next(context) in ('class', 'def'): # The cursor is on a class/function name. user_scope = self._parser.user_scope() definitions = set([user_scope.name]) elif isinstance(user_stmt, tree.Import): s, name = helpers.get_on_import_stmt(self._evaluator, self._user_context, user_stmt) definitions = self._evaluator.goto(name) else: # The Evaluator.goto function checks for definitions, but since we # use a reverse tokenizer, we have new name_part objects, so we # have to check the user_stmt here for positions. if isinstance(user_stmt, tree.ExprStmt) \ and isinstance(last_name.parent, tree.ExprStmt): for name in user_stmt.get_defined_names(): if name.start_pos <= self._pos <= name.end_pos: return [name] defs = self._evaluator.goto(last_name) definitions = follow_inexistent_imports(defs) return definitions
AttributeError
dataset/ETHPy150Open davidhalter/jedi/jedi/api/__init__.py/Script._goto
111
def _analysis(self): def check_types(types): for typ in types: try: f = typ.iter_content except __HOLE__: pass else: check_types(f()) #statements = set(chain(*self._parser.module().used_names.values())) nodes, imp_names, decorated_funcs = \ analysis.get_module_statements(self._parser.module()) # Sort the statements so that the results are reproducible. for n in imp_names: imports.ImportWrapper(self._evaluator, n).follow() for node in sorted(nodes, key=lambda obj: obj.start_pos): check_types(self._evaluator.eval_element(node)) for dec_func in decorated_funcs: er.Function(self._evaluator, dec_func).get_decorated_func() ana = [a for a in self._evaluator.analysis if self.path == a.path] return sorted(set(ana), key=lambda x: x.line)
AttributeError
dataset/ETHPy150Open davidhalter/jedi/jedi/api/__init__.py/Script._analysis
112
def _simple_complete(self, path, dot, like): user_stmt = self._parser.user_stmt_with_whitespace() is_simple_path = not path or re.search('^[\w][\w\d.]*$', path) if isinstance(user_stmt, tree.Import) or not is_simple_path: return super(Interpreter, self)._simple_complete(path, dot, like) else: class NamespaceModule(object): def __getattr__(_, name): for n in self.namespaces: try: return n[name] except __HOLE__: pass raise AttributeError() def __dir__(_): gen = (n.keys() for n in self.namespaces) return list(set(chain.from_iterable(gen))) paths = path.split('.') if path else [] namespaces = (NamespaceModule(), builtins) for p in paths: old, namespaces = namespaces, [] for n in old: try: namespaces.append(getattr(n, p)) except Exception: pass completion_names = [] for namespace in namespaces: for name in dir(namespace): if name.lower().startswith(like.lower()): scope = self._parser.module() n = FakeName(name, scope) completion_names.append(n) return completion_names
KeyError
dataset/ETHPy150Open davidhalter/jedi/jedi/api/__init__.py/Interpreter._simple_complete
113
def get_class_alias(klass): """ Tries to find a suitable L{pyamf.ClassAlias} subclass for C{klass}. """ for k, v in pyamf.ALIAS_TYPES.iteritems(): for kl in v: try: if issubclass(klass, kl): return k except __HOLE__: # not a class if hasattr(kl, '__call__'): if kl(klass) is True: return k
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/PyAMF-0.6.1/pyamf/util/__init__.py/get_class_alias
114
@login_required @require_POST @xframe_options_sameorigin def up_image_async(request, model_name, object_pk): """Upload all images in request.FILES.""" # Verify the model agaist our white-list if model_name not in ALLOWED_MODELS: message = _('Model not allowed.') return HttpResponseBadRequest( json.dumps({'status': 'error', 'message': message})) # Get the model m = get_model(*model_name.split('.')) # Then look up the object by pk try: obj = m.objects.get(pk=object_pk) except __HOLE__: message = _('Object does not exist.') return HttpResponseNotFound( json.dumps({'status': 'error', 'message': message})) try: file_info = upload_imageattachment(request, obj) except FileTooLargeError as e: return HttpResponseBadRequest( json.dumps({'status': 'error', 'message': e.args[0]})) if isinstance(file_info, dict) and 'thumbnail_url' in file_info: return HttpResponse( json.dumps({'status': 'success', 'file': file_info})) message = _('Invalid or no image received.') return HttpResponseBadRequest( json.dumps({'status': 'error', 'message': message, 'errors': file_info}))
ObjectDoesNotExist
dataset/ETHPy150Open mozilla/kitsune/kitsune/upload/views.py/up_image_async
115
def __init__(self, environ): script_name = base.get_script_name(environ) path_info = force_unicode(environ.get('PATH_INFO', u'/')) if not path_info or path_info == script_name: # Sometimes PATH_INFO exists, but is empty (e.g. accessing # the SCRIPT_NAME URL without a trailing slash). We really need to # operate as if they'd requested '/'. Not amazingly nice to force # the path like this, but should be harmless. # # (The comparison of path_info to script_name is to work around an # apparent bug in flup 1.0.1. Se Django ticket #8490). path_info = u'/' self.environ = environ self.path_info = path_info self.path = '%s%s' % (script_name, path_info) self.META = environ self.META['PATH_INFO'] = path_info self.META['SCRIPT_NAME'] = script_name self.method = environ['REQUEST_METHOD'].upper() self._post_parse_error = False if type(socket._fileobject) is type and isinstance(self.environ['wsgi.input'], socket._fileobject): # Under development server 'wsgi.input' is an instance of # socket._fileobject which hangs indefinitely on reading bytes past # available count. To prevent this it's wrapped in LimitedStream # that doesn't read past Content-Length bytes. # # This is not done for other kinds of inputs (like flup's FastCGI # streams) beacuse they don't suffer from this problem and we can # avoid using another wrapper with its own .read and .readline # implementation. # # The type check is done because for some reason, AppEngine # implements _fileobject as a function, not a class. try: content_length = int(self.environ.get('CONTENT_LENGTH', 0)) except (ValueError, __HOLE__): content_length = 0 self._stream = LimitedStream(self.environ['wsgi.input'], content_length) else: self._stream = self.environ['wsgi.input'] self._read_started = False
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/core/handlers/wsgi.py/WSGIRequest.__init__
116
def __call__(self, environ, start_response): from django.conf import settings # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.initLock.acquire() try: try: # Check that middleware is still uninitialised. if self._request_middleware is None: self.load_middleware() except: # Unload whatever middleware we got self._request_middleware = None raise finally: self.initLock.release() set_script_prefix(base.get_script_name(environ)) signals.request_started.send(sender=self.__class__) try: try: request = self.request_class(environ) except __HOLE__: logger.warning('Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={ 'status_code': 400, } ) response = http.HttpResponseBadRequest() else: response = self.get_response(request) finally: signals.request_finished.send(sender=self.__class__) try: status_text = STATUS_CODE_TEXT[response.status_code] except KeyError: status_text = 'UNKNOWN STATUS CODE' status = '%s %s' % (response.status_code, status_text) response_headers = [(str(k), str(v)) for k, v in response.items()] for c in response.cookies.values(): response_headers.append(('Set-Cookie', str(c.output(header='')))) start_response(status, response_headers) return response
UnicodeDecodeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/core/handlers/wsgi.py/WSGIHandler.__call__
117
def _setup_flavor(env, flavor): """Setup a flavor, providing customization hooks to modify CloudBioLinux installs. Specify flavor as a name, in which case we look it up in the standard flavor directory (contrib/flavor/your_flavor), or as an absolute path to a flavor directory outside of cloudbiolinux. """ env.flavor = Flavor(env) env.flavor_dir = None if flavor: # set the directory for flavor customizations if os.path.isabs(flavor) or os.path.exists(flavor): flavor_dir = flavor else: flavor_dir = os.path.join(os.path.dirname(__file__), '..', 'contrib', 'flavor', flavor) assert os.path.exists(flavor_dir), \ "Did not find directory {0} for flavor {1}".format(flavor_dir, flavor) env.flavor_dir = flavor_dir flavor_name = os.path.split(flavor_dir)[-1] # Reinstantiate class if custom defined import cloudbio.flavor try: env.flavor = getattr(cloudbio.flavor, flavor_name.capitalize())(env) except __HOLE__: pass env.flavor.name = flavor_name # Load python customizations to base configuration if present for ext in ["", "flavor"]: py_flavor = os.path.split(os.path.realpath(flavor_dir))[1] + ext flavor_custom_py = os.path.join(flavor_dir, "{0}.py".format(py_flavor)) if os.path.exists(flavor_custom_py): sys.path.append(flavor_dir) mod = __import__(py_flavor, fromlist=[py_flavor]) env.logger.info(env.flavor) env.logger.info("This is a %s flavor" % env.flavor.name)
AttributeError
dataset/ETHPy150Open chapmanb/cloudbiolinux/cloudbio/utils.py/_setup_flavor
118
@staticmethod def try_registered_completion(ctxt, symname, completions): debugging = ctxt.get_binding('*DEBUG*', False) if ctxt.remainder or completions is None: return False try: completer = ctxt.get_completer(symname) except __HOLE__: return False if debugging: print "Trying completer %r with %r" % (completer, ctxt) try: new_compls = completer(ctxt) except Exception: if debugging: import traceback traceback.print_exc() return False if debugging: print "got %r" % (new_compls,) completions.update(new_compls) return True
KeyError
dataset/ETHPy150Open francelabs/datafari/cassandra/pylib/cqlshlib/pylexotron.py/matcher.try_registered_completion
119
def match(self, ctxt, completions): prevname = ctxt.productionname try: rule = ctxt.get_production_by_name(self.arg) except __HOLE__: raise ValueError("Can't look up production rule named %r" % (self.arg,)) output = rule.match(ctxt.with_production_named(self.arg), completions) return [c.with_production_named(prevname) for c in output]
KeyError
dataset/ETHPy150Open francelabs/datafari/cassandra/pylib/cqlshlib/pylexotron.py/rule_reference.match
120
def validate_port_or_colon_separated_port_range(port_range): """Accepts a port number or a single-colon separated range.""" if port_range.count(':') > 1: raise ValidationError(_("One colon allowed in port range")) ports = port_range.split(':') for port in ports: try: if int(port) not in range(-1, 65536): raise ValidationError(_("Not a valid port number")) except __HOLE__: raise ValidationError(_("Port number must be integer"))
ValueError
dataset/ETHPy150Open CiscoSystems/avos/horizon/utils/validators.py/validate_port_or_colon_separated_port_range
121
def __eq__(self, other): try: return self.__dict__ == other.__dict__ except __HOLE__: return False
AttributeError
dataset/ETHPy150Open Teradata/PyTd/teradata/datatypes.py/Interval.__eq__
122
def __eq__(self, other): try: return self.__dict__ == other.__dict__ except __HOLE__: return False
AttributeError
dataset/ETHPy150Open Teradata/PyTd/teradata/datatypes.py/Period.__eq__
123
def test_private_field_access_raises_exception(self): try: self.proxy._private_field.get() self.fail('Should raise AttributeError exception') except __HOLE__: pass except Exception: self.fail('Should raise AttributeError exception')
AttributeError
dataset/ETHPy150Open jodal/pykka/tests/field_access_test.py/FieldAccessTest.test_private_field_access_raises_exception
124
@cached_property def is_good_choice(self): if self.biblio_name=="title": try: if self.biblio_value.isupper(): return False except __HOLE__: #some titles are ints, apparently return False return True
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/BiblioRow.is_good_choice
125
@cached_property def display_year(self): try: return str(self.year) except (__HOLE__, UnicodeEncodeError): return None
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.display_year
126
@cached_property def calculated_host(self): try: return self.repository.split(" ")[0].lower() except __HOLE__: return None
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.calculated_host
127
@cached_property def display_authors(self): try: auths = ",".join(self.authors.split(",")[0:3]) if auths.isupper(): auths = auths.title() if len(auths) < len(self.authors): auths += " et al." except __HOLE__: auths = None return auths
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.display_authors
128
@cached_property def author_list(self): try: auth_list = self.authors.split(",") except __HOLE__: auth_list = [] ret = [] for auth in auth_list: my_auth = auth.strip() try: if my_auth.isupper(): my_auth = my_auth.title() except AttributeError: pass ret.append(my_auth) return ret
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.author_list
129
@cached_property def display_title(self): try: ret = self.title except __HOLE__: ret = "no title available" try: if ret.isupper(): ret = ret.title() except AttributeError: #some titles are ints, apparently pass return ret
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.display_title
130
@cached_property def display_host(self): try: return self.journal except AttributeError: try: return self.repository except __HOLE__: return ''
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.display_host
131
@cached_property def free_fulltext_host(self): try: return self._get_url_host(self.free_fulltext_url) except __HOLE__: return None
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/biblio.py/Biblio.free_fulltext_host
132
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'parallels', vm_['profile'], vm_=vm_) is False: return False except __HOLE__: pass # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport'] ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) try: data = create_node(vm_) except Exception as exc: log.error( 'Error creating {0} on PARALLELS\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format( vm_['name'], str(exc) ), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False name = vm_['name'] if not wait_until(name, 'CREATED'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} start(vm_['name'], call='action') if not wait_until(name, 'STARTED'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if 'public-ip' not in data['network']: # Trigger another iteration return return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'],), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=5), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) comps = data['network']['public-ip']['address'].split('/') public_ip = comps[0] vm_['ssh_host'] = public_ip ret = salt.utils.cloud.bootstrap(vm_, __opts__) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug( '\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport'] ) return data
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/parallels.py/create
133
def render(self, request): """ Renders dojo.data compatible JSON if self.data is a QuerySet, falls back to standard JSON. """ callback = request.GET.get('callback', None) try: indent = int(request.GET['indent']) except (KeyError, __HOLE__): indent = None data = self.construct() if isinstance(self.data, QuerySet): unicode_lookup_table = dict() [unicode_lookup_table.__setitem__(item.pk, unicode(item)) \ for item in self.data] for dict_item in data: try: id = dict_item['id'] except KeyError: raise KeyError('The handler of the model that you want '\ 'to emit as DojoData needs to expose the `id` field!') else: dict_item.setdefault('_unicode', unicode_lookup_table[id]) data = { 'identifier': 'id', 'items': data, 'label': '_unicode', 'numRows': self.data.count(), } serialized_data = json.dumps(data, ensure_ascii=False, cls=DateTimeAwareJSONEncoder, indent=indent) if callback and is_valid_jsonp_callback_value(callback): return '%s(%s)' % (callback, serialized_data) return serialized_data
ValueError
dataset/ETHPy150Open klipstein/dojango/dojango/data/piston/emitters.py/DojoDataEmitter.render
134
def convert(self, text): """Convert the given text.""" # Main function. The order in which other subs are called here is # essential. Link and image substitutions need to happen before # _EscapeSpecialChars(), so that any *'s or _'s in the <a> # and <img> tags get encoded. # Clear the global hashes. If we don't clear these, you get conflicts # from other articles when generating a page which contains more than # one article (e.g. an index page that shows the N most recent # articles): self.reset() if not isinstance(text, unicode): #TODO: perhaps shouldn't presume UTF-8 for string input? text = unicode(text, 'utf-8') if self.use_file_vars: # Look for emacs-style file variable hints. emacs_vars = self._get_emacs_vars(text) if "markdown-extras" in emacs_vars: splitter = re.compile("[ ,]+") for e in splitter.split(emacs_vars["markdown-extras"]): if '=' in e: ename, earg = e.split('=', 1) try: earg = int(earg) except __HOLE__: pass else: ename, earg = e, None self.extras[ename] = earg # Standardize line endings: text = re.sub("\r\n|\r", "\n", text) # Make sure $text ends with a couple of newlines: text += "\n\n" # Convert all tabs to spaces. text = self._detab(text) # Strip any lines consisting only of spaces and tabs. # This makes subsequent regexen easier to write, because we can # match consecutive blank lines with /\n+/ instead of something # contorted like /[ \t]*\n+/ . text = self._ws_only_line_re.sub("", text) if self.safe_mode: text = self._hash_html_spans(text) # Turn block-level HTML blocks into hash entries text = self._hash_html_blocks(text, raw=True) # Strip link definitions, store in hashes. if "footnotes" in self.extras: # Must do footnotes first because an unlucky footnote defn # looks like a link defn: # [^4]: this "looks like a link defn" text = self._strip_footnote_definitions(text) text = self._strip_link_definitions(text) text = self._run_block_gamut(text) text = self._unescape_special_chars(text) if "footnotes" in self.extras: text = self._add_footnotes(text) if self.safe_mode: text = self._unhash_html_spans(text) text += "\n" return text
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/Markdown.convert
135
def _get_emacs_vars(self, text): """Return a dictionary of emacs-style local variables. Parsing is done loosely according to this spec (and according to some in-practice deviations from this): http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables """ emacs_vars = {} SIZE = pow(2, 13) # 8kB # Search near the start for a '-*-'-style one-liner of variables. head = text[:SIZE] if "-*-" in head: match = self._emacs_oneliner_vars_pat.search(head) if match: emacs_vars_str = match.group(1) assert '\n' not in emacs_vars_str emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') if s.strip()] if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: # While not in the spec, this form is allowed by emacs: # -*- Tcl -*- # where the implied "variable" is "mode". This form # is only allowed if there are no other variables. emacs_vars["mode"] = emacs_var_strs[0].strip() else: for emacs_var_str in emacs_var_strs: try: variable, value = emacs_var_str.strip().split(':', 1) except __HOLE__: log.debug("emacs variables error: malformed -*- " "line: %r", emacs_var_str) continue # Lowercase the variable name because Emacs allows "Mode" # or "mode" or "MoDe", etc. emacs_vars[variable.lower()] = value.strip() tail = text[-SIZE:] if "Local Variables" in tail: match = self._emacs_local_vars_pat.search(tail) if match: prefix = match.group("prefix") suffix = match.group("suffix") lines = match.group("content").splitlines(0) #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ # % (prefix, suffix, match.group("content"), lines) # Validate the Local Variables block: proper prefix and suffix # usage. for i, line in enumerate(lines): if not line.startswith(prefix): log.debug("emacs variables error: line '%s' " "does not use proper prefix '%s'" % (line, prefix)) return {} # Don't validate suffix on last line. Emacs doesn't care, # neither should we. if i != len(lines)-1 and not line.endswith(suffix): log.debug("emacs variables error: line '%s' " "does not use proper suffix '%s'" % (line, suffix)) return {} # Parse out one emacs var per line. continued_for = None for line in lines[:-1]: # no var on the last line ("PREFIX End:") if prefix: line = line[len(prefix):] # strip prefix if suffix: line = line[:-len(suffix)] # strip suffix line = line.strip() if continued_for: variable = continued_for if line.endswith('\\'): line = line[:-1].rstrip() else: continued_for = None emacs_vars[variable] += ' ' + line else: try: variable, value = line.split(':', 1) except ValueError: log.debug("local variables error: missing colon " "in local variables entry: '%s'" % line) continue # Do NOT lowercase the variable name, because Emacs only # allows "mode" (and not "Mode", "MoDe", etc.) in this block. value = value.strip() if value.endswith('\\'): value = value[:-1].rstrip() continued_for = variable else: continued_for = None emacs_vars[variable] = value # Unquote values. for var, val in emacs_vars.items(): if len(val) > 1 and (val.startswith('"') and val.endswith('"') or val.startswith('"') and val.endswith('"')): emacs_vars[var] = val[1:-1] return emacs_vars # Cribbed from a post by Bart Lateur: # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/Markdown._get_emacs_vars
136
def _hash_html_blocks(self, text, raw=False): """Hashify HTML blocks We only want to do this for block-level HTML tags, such as headers, lists, and tables. That's because we still want to wrap <p>s around "paragraphs" that are wrapped in non-block-level tags, such as anchors, phrase emphasis, and spans. The list of tags we're looking for is hard-coded. @param raw {boolean} indicates if these are raw HTML blocks in the original source. It makes a difference in "safe" mode. """ if '<' not in text: return text # Pass `raw` value into our calls to self._hash_html_block_sub. hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) # First, look for nested blocks, e.g.: # <div> # <div> # tags for inner block must be indented. # </div> # </div> # # The outermost tags must start at the left margin for this to match, and # the inner nested divs must be indented. # We need to do this before the next, more liberal match, because the next # match will start at the first `<div>` and stop at the first `</div>`. text = self._strict_tag_block_re.sub(hash_html_block_sub, text) # Now match more liberally, simply from `\n<tag>` to `</tag>\n` text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) # Special case just for <hr />. It was easier to make a special # case than to make the other regex more complicated. if "<hr" in text: _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width) text = _hr_tag_re.sub(hash_html_block_sub, text) # Special case for standalone HTML comments: if "<!--" in text: start = 0 while True: # Delimiters for next comment block. try: start_idx = text.index("<!--", start) except ValueError, ex: break try: end_idx = text.index("-->", start_idx) + 3 except __HOLE__, ex: break # Start position for next comment block search. start = end_idx # Validate whitespace before comment. if start_idx: # - Up to `tab_width - 1` spaces before start_idx. for i in range(self.tab_width - 1): if text[start_idx - 1] != ' ': break start_idx -= 1 if start_idx == 0: break # - Must be preceded by 2 newlines or hit the start of # the document. if start_idx == 0: pass elif start_idx == 1 and text[0] == '\n': start_idx = 0 # to match minute detail of Markdown.pl regex elif text[start_idx-2:start_idx] == '\n\n': pass else: break # Validate whitespace after comment. # - Any number of spaces and tabs. while end_idx < len(text): if text[end_idx] not in ' \t': break end_idx += 1 # - Must be following by 2 newlines or hit end of text. if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): continue # Escape and hash (must match `_hash_html_block_sub`). html = text[start_idx:end_idx] if raw and self.safe_mode: html = self._sanitize_html(html) key = _hash_text(html) self.html_blocks[key] = html text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] if "xml" in self.extras: # Treat XML processing instructions and namespaced one-liner # tags as if they were block HTML tags. E.g., if standalone # (i.e. are their own paragraph), the following do not get # wrapped in a <p> tag: # <?foo bar?> # # <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/> _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) text = _xml_oneliner_re.sub(hash_html_block_sub, text) return text
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/Markdown._hash_html_blocks
137
def _do_links(self, text): """Turn Markdown link shortcuts into XHTML <a> and <img> tags. This is a combination of Markdown.pl's _DoAnchors() and _DoImages(). They are done together because that simplified the approach. It was necessary to use a different approach than Markdown.pl because of the lack of atomic matching support in Python's regex engine used in $g_nested_brackets. """ MAX_LINK_TEXT_SENTINEL = 300 # `anchor_allowed_pos` is used to support img links inside # anchors, but not anchors inside anchors. An anchor's start # pos must be `>= anchor_allowed_pos`. anchor_allowed_pos = 0 curr_pos = 0 while True: # Handle the next link. # The next '[' is the start of: # - an inline anchor: [text](url "title") # - a reference anchor: [text][id] # - an inline img: ![text](url "title") # - a reference img: ![text][id] # - a footnote ref: [^id] # (Only if 'footnotes' extra enabled) # - a footnote defn: [^id]: ... # (Only if 'footnotes' extra enabled) These have already # been stripped in _strip_footnote_definitions() so no # need to watch for them. # - a link definition: [id]: url "title" # These have already been stripped in # _strip_link_definitions() so no need to watch for them. # - not markup: [...anything else... try: start_idx = text.index('[', curr_pos) except __HOLE__: break text_length = len(text) # Find the matching closing ']'. # Markdown.pl allows *matching* brackets in link text so we # will here too. Markdown.pl *doesn't* currently allow # matching brackets in img alt text -- we'll differ in that # regard. bracket_depth = 0 for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, text_length)): ch = text[p] if ch == ']': bracket_depth -= 1 if bracket_depth < 0: break elif ch == '[': bracket_depth += 1 else: # Closing bracket not found within sentinel length. # This isn't markup. curr_pos = start_idx + 1 continue link_text = text[start_idx+1:p] # Possibly a footnote ref? if "footnotes" in self.extras and link_text.startswith("^"): normed_id = re.sub(r'\W', '-', link_text[1:]) if normed_id in self.footnotes: self.footnote_ids.append(normed_id) result = '<sup class="footnote-ref" id="fnref-%s">' \ '<a href="#fn-%s">%s</a></sup>' \ % (normed_id, normed_id, len(self.footnote_ids)) text = text[:start_idx] + result + text[p+1:] else: # This id isn't defined, leave the markup alone. curr_pos = p+1 continue # Now determine what this is by the remainder. p += 1 if p == text_length: return text # Inline anchor or img? if text[p] == '(': # attempt at perf improvement match = self._tail_of_inline_link_re.match(text, p) if match: # Handle an inline anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 url, title = match.group("url"), match.group("title") if url and url[0] == '<': url = url[1:-1] # '<url>' -> 'url' # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) if title: title_str = ' title="%s"' \ % title.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) \ .replace('"', '&quot;') else: title_str = '' if is_img: result = '<img src="%s" alt="%s"%s%s' \ % (url, link_text.replace('"', '&quot;'), title_str, self.empty_element_suffix) curr_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] elif start_idx >= anchor_allowed_pos: result_head = '<a href="%s"%s>' % (url, title_str) result = '%s%s</a>' % (result_head, link_text) # <img> allowed from curr_pos on, <a> from # anchor_allowed_pos on. curr_pos = start_idx + len(result_head) anchor_allowed_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] else: # Anchor not allowed here. curr_pos = start_idx + 1 continue # Reference anchor or img? else: match = self._tail_of_reference_link_re.match(text, p) if match: # Handle a reference-style anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 link_id = match.group("id").lower() if not link_id: link_id = link_text.lower() # for links like [this][] if link_id in self.urls: url = self.urls[link_id] # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) title = self.titles.get(link_id) if title: title = title.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) title_str = ' title="%s"' % title else: title_str = '' if is_img: result = '<img src="%s" alt="%s"%s%s' \ % (url, link_text.replace('"', '&quot;'), title_str, self.empty_element_suffix) curr_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] elif start_idx >= anchor_allowed_pos: result = '<a href="%s"%s>%s</a>' \ % (url, title_str, link_text) result_head = '<a href="%s"%s>' % (url, title_str) result = '%s%s</a>' % (result_head, link_text) # <img> allowed from curr_pos on, <a> from # anchor_allowed_pos on. curr_pos = start_idx + len(result_head) anchor_allowed_pos = start_idx + len(result) text = text[:start_idx] + result + text[match.end():] else: # Anchor not allowed here. curr_pos = start_idx + 1 else: # This id isn't defined, leave the markup alone. curr_pos = match.end() continue # Otherwise, it isn't markup. curr_pos = start_idx + 1 return text
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/Markdown._do_links
138
def _get_pygments_lexer(self, lexer_name): try: from pygments import lexers, util except __HOLE__: return None try: return lexers.get_lexer_by_name(lexer_name) except util.ClassNotFound: return None
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/Markdown._get_pygments_lexer
139
def _regex_from_encoded_pattern(s): """'foo' -> re.compile(re.escape('foo')) '/foo/' -> re.compile('foo') '/foo/i' -> re.compile('foo', re.I) """ if s.startswith('/') and s.rfind('/') != 0: # Parse it: /PATTERN/FLAGS idx = s.rfind('/') pattern, flags_str = s[1:idx], s[idx+1:] flag_from_char = { "i": re.IGNORECASE, "l": re.LOCALE, "s": re.DOTALL, "m": re.MULTILINE, "u": re.UNICODE, } flags = 0 for char in flags_str: try: flags |= flag_from_char[char] except __HOLE__: raise ValueError("unsupported regex flag: '%s' in '%s' " "(must be one of '%s')" % (char, s, ''.join(flag_from_char.keys()))) return re.compile(s[1:idx], flags) else: # not an encoded regex return re.compile(re.escape(s)) # Recipe: dedent (0.1.2)
KeyError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/_regex_from_encoded_pattern
140
def __call__(self, *args): try: return self.cache[args] except KeyError: self.cache[args] = value = self.func(*args) return value except __HOLE__: # uncachable -- for instance, passing a list as an argument. # Better to not cache than to blow up entirely. return self.func(*args)
TypeError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/_memoized.__call__
141
def main(argv=None): if argv is None: argv = sys.argv if not logging.root.handlers: logging.basicConfig() usage = "usage: %prog [PATHS...]" version = "%prog "+__version__ parser = optparse.OptionParser(prog="markdown2", usage=usage, version=version, description=cmdln_desc, formatter=_NoReflowFormatter()) parser.add_option("-v", "--verbose", dest="log_level", action="store_const", const=logging.DEBUG, help="more verbose output") parser.add_option("--encoding", help="specify encoding of text content") parser.add_option("--html4tags", action="store_true", default=False, help="use HTML 4 style for empty element tags") parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", help="sanitize literal HTML: 'escape' escapes " "HTML meta chars, 'replace' replaces with an " "[HTML_REMOVED] note") parser.add_option("-x", "--extras", action="append", help="Turn on specific extra features (not part of " "the core Markdown spec). Supported values: " "'code-friendly' disables _/__ for emphasis; " "'code-color' adds code-block syntax coloring; " "'link-patterns' adds auto-linking based on patterns; " "'footnotes' adds the footnotes syntax;" "'xml' passes one-liner processing instructions and namespaced XML tags;" "'pyshell' to put unindented Python interactive shell sessions in a <code> block.") parser.add_option("--use-file-vars", help="Look for and use Emacs-style 'markdown-extras' " "file var to turn on extras. See " "<http://code.google.com/p/python-markdown2/wiki/Extras>.") parser.add_option("--link-patterns-file", help="path to a link pattern file") parser.add_option("--self-test", action="store_true", help="run internal self-tests (some doctests)") parser.add_option("--compare", action="store_true", help="run against Markdown.pl as well (for testing)") parser.set_defaults(log_level=logging.INFO, compare=False, encoding="utf-8", safe_mode=None, use_file_vars=False) opts, paths = parser.parse_args() log.setLevel(opts.log_level) if opts.self_test: return _test() if opts.extras: extras = {} for s in opts.extras: splitter = re.compile("[,;: ]+") for e in splitter.split(s): if '=' in e: ename, earg = e.split('=', 1) try: earg = int(earg) except ValueError: pass else: ename, earg = e, None extras[ename] = earg else: extras = None if opts.link_patterns_file: link_patterns = [] f = open(opts.link_patterns_file) try: for i, line in enumerate(f.readlines()): if not line.strip(): continue if line.lstrip().startswith("#"): continue try: pat, href = line.rstrip().rsplit(None, 1) except __HOLE__: raise MarkdownError("%s:%d: invalid link pattern line: %r" % (opts.link_patterns_file, i+1, line)) link_patterns.append( (_regex_from_encoded_pattern(pat), href)) finally: f.close() else: link_patterns = None from os.path import join, dirname, abspath markdown_pl = join(dirname(dirname(abspath(__file__))), "test", "Markdown.pl") for path in paths: if opts.compare: print "==== Markdown.pl ====" perl_cmd = 'perl %s "%s"' % (markdown_pl, path) o = os.popen(perl_cmd) perl_html = o.read() o.close() sys.stdout.write(perl_html) print "==== markdown2.py ====" html = markdown_path(path, encoding=opts.encoding, html4tags=opts.html4tags, safe_mode=opts.safe_mode, extras=extras, link_patterns=link_patterns, use_file_vars=opts.use_file_vars) sys.stdout.write( html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) if opts.compare: print "==== match? %r ====" % (perl_html == html)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/markdown/markdown2.py/main
142
def file_infos(names=None): """ iterates over storage files metadata. note: we put the storage name into the metadata as 'id' :param names: None means "all items" otherwise give a list of storage item names """ storage = current_app.storage if names is None: names = list(storage) for name in names: try: with storage.open(name) as item: meta = dict(item.meta) if delete_if_lifetime_over(item, name): continue meta['id'] = name yield meta except (__HOLE__, IOError) as e: if e.errno != errno.ENOENT: raise
OSError
dataset/ETHPy150Open bepasty/bepasty-server/bepasty/views/filelist.py/file_infos
143
def __getitem__(self, key): try: i = self.keylist.index(key) except __HOLE__: raise KeyError return self.valuelist[i]
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_userdict.py/SeqDict.__getitem__
144
def __setitem__(self, key, value): try: i = self.keylist.index(key) self.valuelist[i] = value except __HOLE__: self.keylist.append(key) self.valuelist.append(value)
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_userdict.py/SeqDict.__setitem__
145
def __delitem__(self, key): try: i = self.keylist.index(key) except __HOLE__: raise KeyError self.keylist.pop(i) self.valuelist.pop(i)
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_userdict.py/SeqDict.__delitem__
146
def AddTransfer(self, throttle_name, token_count): """Add a count to the amount this thread has transferred. Each time a thread transfers some data, it should call this method to note the amount sent. The counts may be rotated if sufficient time has passed since the last rotation. Args: throttle_name: The name of the throttle to add to. token_count: The number to add to the throttle counter. """ self.VerifyThrottleName(throttle_name) transferred = self.transferred[throttle_name] try: transferred[id(threading.currentThread())] += token_count except __HOLE__: thread = threading.currentThread() raise ThreadNotRegisteredError( 'Unregistered thread accessing throttled datastore stub: id = %s\n' 'name = %s' % (id(thread), thread.getName())) if self.last_rotate[throttle_name] + self.ROTATE_PERIOD < self.get_time(): self._RotateCounts(throttle_name)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/remote_api/throttle.py/Throttle.AddTransfer
147
def get_language(language_code): for tag in normalize_language_tag(language_code): tag = tag.replace('-','_') # '-' not valid in module names if tag in _languages: return _languages[tag] try: module = __import__(tag, globals(), locals(), level=0) except __HOLE__: try: module = __import__(tag, globals(), locals(), level=1) except ImportError: continue _languages[tag] = module return module return None
ImportError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/languages/__init__.py/get_language
148
def main(): print("%s (server) #v%s\n" % (NAME, VERSION)) parser = optparse.OptionParser(version=VERSION) parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1]) options, _ = parser.parse_args() read_config(options.config_file) if config.USE_SSL: try: import OpenSSL except ImportError: if subprocess.mswindows: exit("[!] please install 'pyopenssl' (e.g. 'pip install pyopenssl')") else: msg, _ = "[!] please install 'pyopenssl'", platform.linux_distribution()[0].lower() for distro, install in {("fedora", "centos"): "sudo yum install pyOpenSSL", ("debian", "ubuntu"): "sudo apt-get install python-openssl"}.items(): if _ in distro: msg += " (e.g. '%s')" % install break exit(msg) if not config.SSL_PEM or not os.path.isfile(config.SSL_PEM): hint = "openssl req -new -x509 -keyout %s -out %s -days 365 -nodes -subj '/O=%s CA/C=EU'" % (config.SSL_PEM or "server.pem", config.SSL_PEM or "server.pem", NAME) exit("[!] invalid configuration value for 'SSL_PEM' ('%s')\n[?] (hint: \"%s\")" % (config.SSL_PEM, hint)) def update_timer(): retries = 0 while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection(): sys.stdout.write("[!] can't update because of lack of network connection (waiting..." if not retries else '.') sys.stdout.flush() time.sleep(10) retries += 1 if retries: print(")") if retries == CHECK_CONNECTION_MAX_RETRIES: print("[x] going to continue without update") else: if config.USE_SERVER_UPDATE_TRAILS: update_trails() update_ipcat() thread = threading.Timer(config.UPDATE_PERIOD, update_timer) thread.daemon = True thread.start() if config.UDP_ADDRESS and config.UDP_PORT: if check_sudo() is False: exit("[!] please run '%s' with sudo/Administrator privileges when using 'UDP_ADDRESS' configuration value" % __file__) create_log_directory() start_logd(address=config.UDP_ADDRESS, port=config.UDP_PORT, join=False) try: update_timer() start_httpd(address=config.HTTP_ADDRESS, port=config.HTTP_PORT, pem=config.SSL_PEM if config.USE_SSL else None, join=True) except __HOLE__: print("\r[x] stopping (Ctrl-C pressed)")
KeyboardInterrupt
dataset/ETHPy150Open stamparm/maltrail/server.py/main
149
def nP(n, k=None, replacement=False): """Return the number of permutations of ``n`` items taken ``k`` at a time. Possible values for ``n``:: integer - set of length ``n`` sequence - converted to a multiset internally multiset - {element: multiplicity} If ``k`` is None then the total of all permutations of length 0 through the number of items represented by ``n`` will be returned. If ``replacement`` is True then a given item can appear more than once in the ``k`` items. (For example, for 'ab' permutations of 2 would include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in ``n`` is ignored when ``replacement`` is True but the total number of elements is considered since no element can appear more times than the number of elements in ``n``. Examples ======== >>> from sympy.functions.combinatorial.numbers import nP >>> from sympy.utilities.iterables import multiset_permutations, multiset >>> nP(3, 2) 6 >>> nP('abc', 2) == nP(multiset('abc'), 2) == 6 True >>> nP('aab', 2) 3 >>> nP([1, 2, 2], 2) 3 >>> [nP(3, i) for i in range(4)] [1, 3, 6, 6] >>> nP(3) == sum(_) True When ``replacement`` is True, each item can have multiplicity equal to the length represented by ``n``: >>> nP('aabc', replacement=True) 121 >>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)] [1, 3, 9, 27, 81] >>> sum(_) 121 References ========== .. [1] http://en.wikipedia.org/wiki/Permutation See Also ======== sympy.utilities.iterables.multiset_permutations """ try: n = as_int(n) except __HOLE__: return Integer(_nP(_multiset_histogram(n), k, replacement)) return Integer(_nP(n, k, replacement))
ValueError
dataset/ETHPy150Open sympy/sympy/sympy/functions/combinatorial/numbers.py/nP
150
def nT(n, k=None): """Return the number of ``k``-sized partitions of ``n`` items. Possible values for ``n``:: integer - ``n`` identical items sequence - converted to a multiset internally multiset - {element: multiplicity} Note: the convention for ``nT`` is different than that of ``nC`` and ``nP`` in that here an integer indicates ``n`` *identical* items instead of a set of length ``n``; this is in keeping with the ``partitions`` function which treats its integer-``n`` input like a list of ``n`` 1s. One can use ``range(n)`` for ``n`` to indicate ``n`` distinct items. If ``k`` is None then the total number of ways to partition the elements represented in ``n`` will be returned. Examples ======== >>> from sympy.functions.combinatorial.numbers import nT Partitions of the given multiset: >>> [nT('aabbc', i) for i in range(1, 7)] [1, 8, 11, 5, 1, 0] >>> nT('aabbc') == sum(_) True >>> [nT("mississippi", i) for i in range(1, 12)] [1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1] Partitions when all items are identical: >>> [nT(5, i) for i in range(1, 6)] [1, 2, 2, 1, 1] >>> nT('1'*5) == sum(_) True When all items are different: >>> [nT(range(5), i) for i in range(1, 6)] [1, 15, 25, 10, 1] >>> nT(range(5)) == sum(_) True References ========== .. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf See Also ======== sympy.utilities.iterables.partitions sympy.utilities.iterables.multiset_partitions """ from sympy.utilities.enumerative import MultisetPartitionTraverser if isinstance(n, SYMPY_INTS): # assert n >= 0 # all the same if k is None: return sum(_nT(n, k) for k in range(1, n + 1)) return _nT(n, k) if not isinstance(n, _MultisetHistogram): try: # if n contains hashable items there is some # quick handling that can be done u = len(set(n)) if u == 1: return nT(len(n), k) elif u == len(n): n = range(u) raise TypeError except __HOLE__: n = _multiset_histogram(n) N = n[_N] if k is None and N == 1: return 1 if k in (1, N): return 1 if k == 2 or N == 2 and k is None: m, r = divmod(N, 2) rv = sum(nC(n, i) for i in range(1, m + 1)) if not r: rv -= nC(n, m)//2 if k is None: rv += 1 # for k == 1 return rv if N == n[_ITEMS]: # all distinct if k is None: return bell(N) return stirling(N, k) m = MultisetPartitionTraverser() if k is None: return m.count_partitions(n[_M]) # MultisetPartitionTraverser does not have a range-limited count # method, so need to enumerate and count tot = 0 for discard in m.enum_range(n[_M], k-1, k): tot += 1 return tot
TypeError
dataset/ETHPy150Open sympy/sympy/sympy/functions/combinatorial/numbers.py/nT
151
def __getattr__(self, key): try: return getattr(self.comparator, key) except __HOLE__: raise AttributeError( 'Neither %r object nor %r object has an attribute %r' % ( type(self).__name__, type(self.comparator).__name__, key) )
AttributeError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/sql/elements.py/ColumnElement.__getattr__
152
@_generative def bindparams(self, *binds, **names_to_values): """Establish the values and/or types of bound parameters within this :class:`.TextClause` construct. Given a text construct such as:: from sqlalchemy import text stmt = text("SELECT id, name FROM user WHERE name=:name " "AND timestamp=:timestamp") the :meth:`.TextClause.bindparams` method can be used to establish the initial value of ``:name`` and ``:timestamp``, using simple keyword arguments:: stmt = stmt.bindparams(name='jack', timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) Where above, new :class:`.BindParameter` objects will be generated with the names ``name`` and ``timestamp``, and values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``, respectively. The types will be inferred from the values given, in this case :class:`.String` and :class:`.DateTime`. When specific typing behavior is needed, the positional ``*binds`` argument can be used in which to specify :func:`.bindparam` constructs directly. These constructs must include at least the ``key`` argument, then an optional value and type:: from sqlalchemy import bindparam stmt = stmt.bindparams( bindparam('name', value='jack', type_=String), bindparam('timestamp', type_=DateTime) ) Above, we specified the type of :class:`.DateTime` for the ``timestamp`` bind, and the type of :class:`.String` for the ``name`` bind. In the case of ``name`` we also set the default value of ``"jack"``. Additional bound parameters can be supplied at statement execution time, e.g.:: result = connection.execute(stmt, timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) The :meth:`.TextClause.bindparams` method can be called repeatedly, where it will re-use existing :class:`.BindParameter` objects to add new information. For example, we can call :meth:`.TextClause.bindparams` first with typing information, and a second time with value information, and it will be combined:: stmt = text("SELECT id, name FROM user WHERE name=:name " "AND timestamp=:timestamp") stmt = stmt.bindparams( bindparam('name', type_=String), bindparam('timestamp', type_=DateTime) ) stmt = stmt.bindparams( name='jack', timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5) ) .. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method supersedes the argument ``bindparams`` passed to :func:`~.expression.text`. """ self._bindparams = new_params = self._bindparams.copy() for bind in binds: try: existing = new_params[bind.key] except __HOLE__: raise exc.ArgumentError( "This text() construct doesn't define a " "bound parameter named %r" % bind.key) else: new_params[existing.key] = bind for key, value in names_to_values.items(): try: existing = new_params[key] except KeyError: raise exc.ArgumentError( "This text() construct doesn't define a " "bound parameter named %r" % key) else: new_params[key] = existing._with_value(value)
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/sql/elements.py/TextClause.bindparams
153
def __init__(self, whens, value=None, else_=None): """Produce a ``CASE`` expression. The ``CASE`` construct in SQL is a conditional object that acts somewhat analogously to an "if/then" construct in other languages. It returns an instance of :class:`.Case`. :func:`.case` in its usual form is passed a list of "when" constructs, that is, a list of conditions and results as tuples:: from sqlalchemy import case stmt = select([users_table]).\\ where( case( [ (users_table.c.name == 'wendy', 'W'), (users_table.c.name == 'jack', 'J') ], else_='E' ) ) The above statement will produce SQL resembling:: SELECT id, name FROM user WHERE CASE WHEN (name = :name_1) THEN :param_1 WHEN (name = :name_2) THEN :param_2 ELSE :param_3 END When simple equality expressions of several values against a single parent column are needed, :func:`.case` also has a "shorthand" format used via the :paramref:`.case.value` parameter, which is passed a column expression to be compared. In this form, the :paramref:`.case.whens` parameter is passed as a dictionary containing expressions to be compared against keyed to result expressions. The statement below is equivalent to the preceding statement:: stmt = select([users_table]).\\ where( case( {"wendy": "W", "jack": "J"}, value=users_table.c.name, else_='E' ) ) The values which are accepted as result values in :paramref:`.case.whens` as well as with :paramref:`.case.else_` are coerced from Python literals into :func:`.bindparam` constructs. SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted as well. To coerce a literal string expression into a constant expression rendered inline, use the :func:`.literal_column` construct, as in:: from sqlalchemy import case, literal_column case( [ ( orderline.c.qty > 100, literal_column("'greaterthan100'") ), ( orderline.c.qty > 10, literal_column("'greaterthan10'") ) ], else_=literal_column("'lessthan10'") ) The above will render the given constants without using bound parameters for the result values (but still for the comparison values), as in:: CASE WHEN (orderline.qty > :qty_1) THEN 'greaterthan100' WHEN (orderline.qty > :qty_2) THEN 'greaterthan10' ELSE 'lessthan10' END :param whens: The criteria to be compared against, :paramref:`.case.whens` accepts two different forms, based on whether or not :paramref:`.case.value` is used. In the first form, it accepts a list of 2-tuples; each 2-tuple consists of ``(<sql expression>, <value>)``, where the SQL expression is a boolean expression and "value" is a resulting value, e.g.:: case([ (users_table.c.name == 'wendy', 'W'), (users_table.c.name == 'jack', 'J') ]) In the second form, it accepts a Python dictionary of comparison values mapped to a resulting value; this form requires :paramref:`.case.value` to be present, and values will be compared using the ``==`` operator, e.g.:: case( {"wendy": "W", "jack": "J"}, value=users_table.c.name ) :param value: An optional SQL expression which will be used as a fixed "comparison point" for candidate values within a dictionary passed to :paramref:`.case.whens`. :param else\_: An optional SQL expression which will be the evaluated result of the ``CASE`` construct if all expressions within :paramref:`.case.whens` evaluate to false. When omitted, most databases will produce a result of NULL if none of the "when" expressions evaulate to true. """ try: whens = util.dictlike_iteritems(whens) except __HOLE__: pass if value is not None: whenlist = [ (_literal_as_binds(c).self_group(), _literal_as_binds(r)) for (c, r) in whens ] else: whenlist = [ (_no_literals(c).self_group(), _literal_as_binds(r)) for (c, r) in whens ] if whenlist: type_ = list(whenlist[-1])[-1].type else: type_ = None if value is None: self.value = None else: self.value = _literal_as_binds(value) self.type = type_ self.whens = whenlist if else_ is not None: self.else_ = _literal_as_binds(else_) else: self.else_ = None
TypeError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/sql/elements.py/Case.__init__
154
def _column_as_key(element): if isinstance(element, util.string_types): return element if hasattr(element, '__clause_element__'): element = element.__clause_element__() try: return element.key except __HOLE__: return None
AttributeError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/sql/elements.py/_column_as_key
155
def __init__(self, array): """ It takes as an optional input argument the array of the input :data:`parameters` defined in the parameter file. The current implemented types are 'flat' (default), and 'gaussian', which expect also a mean and sigma. Possible extension would take a 'external', needing to read an external file to read for the definition. The entry 'prior' of the dictionary :data:`mcmc_parameters` will hold an instance of this class. It defines one main function, called :func:`draw_from_prior`, that returns a number within the prior volume. """ rd.seed() # Test the length of the array, and initialize the type. if len(array) == 6: # Default behaviour, flat prior self.prior_type = 'flat' else: self.prior_type = array[6].lower() # in case of a gaussian prior, one expects two more entries, mu and # sigma if self.prior_type == 'gaussian': try: self.mu = array[7] self.sigma = array[8] except __HOLE__: raise io_mp.ConfigurationError( "You asked for a gaussian prior, but provided no " + "mean nor sigma. Please add them in the parameter " + "file.") # Store boundaries for convenient access later # Put all fields that are -1 to None to avoid confusion later on. self.prior_range = [a if not((a is -1) or (a is None)) else None for a in deepcopy(array[1:3])]
IndexError
dataset/ETHPy150Open baudren/montepython_public/montepython/prior.py/Prior.__init__
156
def generic_float(self, name, values, default=None): def validate(s, value): try: value = str(float(value)) except __HOLE__: raise ValidationError( "{0} {1} coult not be coerced into a float".format( name, value) ) setattr(s, name, value) return s bundle = { 'name': name, 'values': values, 'handler': validate } return bundle
ValueError
dataset/ETHPy150Open mozilla/inventory/mcsv/resolver.py/Generics.generic_float
157
@meta def primary_attribute(self, **kwargs): def _primary_attribute(s, header, value, **kwargs): try: _, s._primary_attr = map( lambda s: s.strip(), header.split('%') ) except __HOLE__: raise ValidationError( "The primary_attribute header must be in the form " "'primary_attribute%<system-attribute-header>'" ) s._primary_value = getattr( self.get_related(header, value, sys_models.System), s._primary_attr ) return s bundle = { 'name': 'primary_attribute', 'filter_fields': ['asset_tag', 'hostname'], 'values': ['primary_attribute'], 'handler': _primary_attribute, } return bundle
ValueError
dataset/ETHPy150Open mozilla/inventory/mcsv/resolver.py/Resolver.primary_attribute
158
def get_targets(extensions): try: targets = list(extensions.map(lambda ext: ext.plugin())) except __HOLE__: targets = [] return targets
RuntimeError
dataset/ETHPy150Open openstack-infra/subunit2sql/subunit2sql/shell.py/get_targets
159
def execute(self): try: self.op["start_time"] = time.time() self.check_dependencies() handler_func = self.op["callback"] ret = handler_func(self.op["name"], self.args, node=self.op["node"], verbose=self.verbose, quiet=self.quiet, output_dir=self.output_dir, method=self.method, color=self.color, send_output=self.send_output) self.log.debug("op %s returns: %r", self.op["name"], ret) self.op["result"] = ret except (__HOLE__, errors.Error) as error: # SystemExit is what argh produces with invalid args self.log.error("%s/%s [%s] failed: %s: %s", self.op["node"].name, self.op["config"].name, self.op["name"], error.__class__.__name__, error) self.op["result"] = "%s: %s" % (error.__class__.__name__, error) except BaseException as error: self.log.error("%s/%s [%s] failed: %s: %s", self.op["node"].name, self.op["config"].name, self.op["name"], error.__class__.__name__, error) self.op["result"] = "Unhandled error: %s: %s" % ( error.__class__.__name__, error) self.log.exception("task exception") raise finally: self.op["stop_time"] = time.time()
SystemExit
dataset/ETHPy150Open ohmu/poni/poni/tool.py/ControlTask.execute
160
@argh_named("script") @arg_verbose @argh.arg('script', metavar="FILE", type=str, help='script file path or "-" (a single minus-sign) for stdin') @argh.arg('variable', type=str, nargs="*", help="'name=[type:]value'") @expects_obj def handle_script(self, arg): """run commands from a script file""" try: if arg.script != "-": script_text = open(arg.script).read() else: script_text = sys.stdin.read() except (OSError, IOError) as error: raise errors.Error("%s: %s" % (error.__class__.__name__, error)) variables = dict(util.parse_prop(var) for var in arg.variable) variables['current_script_dir'] = os.path.dirname(arg.script) match = re.search(r"^\s*#\s+poni\.template\s*:\s*(\w+)", script_text, re.MULTILINE) engine = match.group(1) if match else "cheetah" script_text = template.render(engine=engine, source_text=script_text, variables=variables) lines = script_text.splitlines() def wrap(args): if " " in args: return repr(args) else: return args def set_repo_path(sub_arg): self.tune_arg_namespace(sub_arg) sub_arg.root_dir = arg.root_dir lines = self.preprocess_script_lines(lines) for i, line in enumerate(lines): args = shlex.split(line, comments=True) if not args: continue if arg.verbose: print("$ " + " ".join(wrap(a) for a in args)) # strip arguments following "--" # TODO: this code is now in two places, refactor namespace = argparse.Namespace() try: extra_loc = args.index("--") namespace.extras = args[extra_loc + 1:] args = args[:extra_loc] except __HOLE__: namespace.extras = [] start = time.time() self.parser.dispatch(argv=args, pre_call=set_repo_path, namespace=namespace) stop = time.time() if namespace.time_op: # pylint: disable=E1101 self.task_times.add_task("L%d" % (i + 1), line, start, stop, args=args)
ValueError
dataset/ETHPy150Open ohmu/poni/poni/tool.py/Tool.handle_script
161
@argh_named("control") @arg_verbose @arg_full_match @arg_flag("-n", "--no-deps", help="do not run dependency tasks") @arg_flag("-i", "--ignore-missing", help="do not fail in case no matching operations are found") @arg_quiet @arg_output_dir @arg_flag("-t", "--clock-tasks", dest="show_times", help="show timeline of execution for each tasks") @argh.arg("-j", "--jobs", metavar="N", type=int, help="max concurrent tasks (default: unlimited)") @argh.arg('pattern', type=str, help='config search pattern') @arg_host_access_method @argh.arg('operation', type=str, help='operation to execute') @expects_obj def handle_control(self, arg): """config control operation""" confman = self.get_confman(arg.root_dir, reset_cache=False) manager = self.get_manager(confman) self.collect_all(manager) # collect all possible control operations all_configs = list(confman.find_config(".", all_configs=True)) all_ops = [] provider = {} for conf_node, conf in all_configs: plugin = conf.get_plugin() if not plugin: # skip pluginless configs self.log.debug("skipping pluginless: %s:%s", conf_node.name, conf.name) continue elif conf_node.get_tree_property("template", False): # skip template nodes self.log.debug("skipping template node: %s:%s", conf_node.name, conf.name) continue for op in plugin.iter_control_operations(conf_node, conf): all_ops.append(op) for feature in op["provides"]: ops = provider.setdefault(feature, []) ops.append(op) handled = set() def add_all_required_ops(op): key = (op["node"].name, op["config"].name, op["name"]) if key in handled: return handled.add(key) node = op["node"] conf = op["config"] tasks[(node.name, conf.name, op["name"])] = op reqs = [(True, req) for req in op["requires"]] reqs.extend((False, req) for req in op["optional_requires"]) for must_have, feature in reqs: try: provider_ops = provider[feature] except __HOLE__: if not must_have: # this feature is optional, missing provider is ok continue raise errors.OperationError( "%s/%s operation %r depends on feature %r, " "which is not provided by any config" % ( node.name, conf.name, arg.operation, feature)) depends = op.setdefault("depends", []) for dep_op in provider_ops: depends.append(dep_op) add_all_required_ops(dep_op) # select user-specified ops and their dependencies from the full list tasks = {} comparison = core.ConfigMatch(arg.pattern, full_match=arg.full_match) for op in all_ops: node = op["node"] conf = op["config"] # control op name, node name, config name, all must match if ((arg.operation != op["name"]) or not comparison.match_node(node.name) or not comparison.match_config(conf.name)): continue op["run"] = True # only explicit targets are marked for running add_all_required_ops(op) if not tasks: if arg.ignore_missing: self.log.info("no matching operations found: --ignore-missing specified, ok!") return else: raise errors.UserError("no matching operations found") if arg.no_deps: # filter out the implicit dependency tasks for op in all_ops: depends = op.get("depends", []) for dep_op in depends[:]: if not dep_op.get("run"): depends.remove(dep_op) # assign tasks runner = work.Runner(max_jobs=arg.jobs) logger = self.log.info if arg.verbose else self.log.debug for op_id, op in tasks.items(): run = op.get("run") or (not arg.no_deps) op["run"] = run if not run: continue plugin = op["plugin"] logger("scheduled to run: %s/%s [%s]", op["node"].name, op["config"].name, op["name"]) task = ControlTask(op, arg.extras, verbose=arg.verbose, quiet=arg.quiet, output_dir=arg.output_dir, method=arg.method, color=arg.color) runner.add_task(task) # execute tasks runner.run_all() # collect results results = [task.op.get("result") for task in runner.stopped] failed = [r for r in results if r] skipped_count = sum(1 for op in tasks.values() if not op["run"]) ran_count = len(tasks) - skipped_count assert len(results) == ran_count # add task times to report for i, task in enumerate(runner.stopped): task_name = "%s/%s" % (task.op["node"].name, task.op["config"].name) self.task_times.add_task(i, task_name, task.op["start_time"], task.op["stop_time"]) if arg.verbose: for task in runner.stopped: res = task.op["result"] if res: self.log.error("FAILED: %s/%s [%s]: %r", task.op["node"].name, task.op["config"].name, task.op["name"], task.op["result"]) self.log.debug("all tasks finished: %r", results) if failed: raise errors.ControlError( "[%d/%d] control tasks failed (%d skipped)" % ( len(failed), ran_count, skipped_count)) else: self.log.info( "all [%d] control tasks finished successfully (%d skipped)", ran_count, skipped_count)
KeyError
dataset/ETHPy150Open ohmu/poni/poni/tool.py/Tool.handle_control
162
@argh_named("cp") @arg_verbose @arg_full_match @arg_host_access_method @arg_flag("-d", "--create-dest-dir", help="create missing remote target directories") @arg_flag("-r", "--recursive", help="copy directories recursively") @argh.arg('source', type=str, nargs="+", help='source file/dir to copy') @arg_target_nodes @argh.arg('dest_dir', type=str, help='destination remote directory') @expects_obj def handle_remote_cp(self, arg): """copy file(s) to remote node(s)""" def pp(pth): """pretty-print paths to output-safe ascii""" return repr(str(pth))[1:-1] # sanity check for source in arg.source: try: st = os.stat(source) if not arg.recursive and stat.S_ISDIR(st.st_mode): raise errors.UserError("copy source {0} is a directory and no -r specified".format( pp(source))) except __HOLE__ as err: raise errors.UserError("invalid copy source {0}: {1.__class__.__name__}: {1}".format( # pylint: disable=E1306 pp(source), err)) def copy_file_or_dir(node, remote, source_path, dest_dir): try: remote.stat(dest_dir) except errors.RemoteError as err: if not "Errno 2" in str(err): raise errors.UserError("{0}: unexpected error checking target directory {1}: {2.__class__.__name__}: {2}".format( # pylint: disable=E1306 node.name, pp(dest_dir), err)) elif not arg.create_dest_dir: raise errors.UserError("{0}: Remote directory {1} does not exist. (use -d to create it)".format( node.name, pp(dest_dir))) remote.makedirs(dest_dir) if os.path.isdir(source_path): source_paths = [os.path.join(source_path, name) for name in os.listdir(source_path)] else: source_paths = [source_path] for file_path in source_paths: dest_path = os.path.join(dest_dir, os.path.basename(file_path)) lstat = os.lstat(file_path.stat) if stat.S_ISDIR(lstat.st_mode): copy_file_or_dir(node, remote, os.path.join(source_path, os.path.basename(file_path)), dest_path) continue if arg.verbose: self.log.info("copying: %s -> %s:%s [%s]", pp(file_path), node.addr(), pp(dest_dir), node.name) remote.put_file(file_path, dest_path) remote.utime(dest_path, (int(lstat.st_mtime), int(lstat.st_mtime))) def copy_op(arg, node, remote): for source in arg.source: copy_file_or_dir(node, remote, source, arg.dest_dir) copy_op.doc = "cp" confman = self.get_confman(arg.root_dir, reset_cache=False) self.remote_op(confman, arg, copy_op)
OSError
dataset/ETHPy150Open ohmu/poni/poni/tool.py/Tool.handle_remote_cp
163
@argh_named("update") @arg_full_match @argh.arg('target', type=str, help='target systems/nodes (regexp)') @expects_obj def handle_cloud_update(self, arg): """update node cloud instance properties""" confman = self.get_confman(arg.root_dir) for node in confman.find(arg.target, full_match=arg.full_match): cloud_prop = node.get("cloud", {}) if not cloud_prop.get("instance"): continue provider = self.sky.get_provider(cloud_prop) updates = provider.wait_instances([cloud_prop], wait_state=None) try: update = updates[cloud_prop["instance"]] except __HOLE__: raise errors.Error( "TODO: did not get update from cloud provider for %r" % cloud_prop["instance"]) changes = node.log_update(update) if changes: change_str = ", ".join(("%s=%r (from %r)" % (c[0], c[2], c[1])) for c in changes) self.log.info("%s: updated: %s", node.name, change_str) node.save()
KeyError
dataset/ETHPy150Open ohmu/poni/poni/tool.py/Tool.handle_cloud_update
164
def run(self, args=None): def adjust_logging(arg): """tune the logging before executing commands""" self.tune_arg_namespace(arg) if arg.time_log and os.path.exists(arg.time_log): self.task_times.load(arg.time_log) if arg.debug: logging.getLogger().setLevel(logging.DEBUG) else: # paramiko is very talkative even at ERROR level... paramiko_logger = logging.getLogger('paramiko.transport') paramiko_logger.setLevel(logging.CRITICAL) # boto blabbers http errors at ERROR severity... boto_logger = logging.getLogger('boto') boto_logger.setLevel(logging.CRITICAL) # strip arguments following "--" args = args or sys.argv[1:] namespace = argparse.Namespace() try: extra_loc = args.index("--") namespace.extras = args[extra_loc + 1:] args = args[:extra_loc] except __HOLE__: namespace.extras = [] try: start = time.time() exit_code = self.parser.dispatch(argv=args, pre_call=adjust_logging, raw_output=True, namespace=namespace) stop = time.time() if namespace.time_op: # pylint: disable=E1101 op_name = namespace.time_op if (namespace.time_op != "-") else (" ".join(args)) # pylint: disable=E1101 self.task_times.add_task("C", op_name, start, stop, args=args) except KeyboardInterrupt: self.log.error("*** terminated by keyboard ***") if namespace.pass_thru_exceptions: # pylint: disable=E1101 raise return -1 except errors.Error as error: self.log.error("%s: %s", error.__class__.__name__, error) if namespace.pass_thru_exceptions: # pylint: disable=E1101 raise return -1 finally: if namespace.time_log: # pylint: disable=E1101 self.task_times.save(namespace.time_log) # pylint: disable=E1101 rcontrol_all.manager.cleanup() return exit_code
ValueError
dataset/ETHPy150Open ohmu/poni/poni/tool.py/Tool.run
165
def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, __HOLE__, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) try: from leonardo.module.web.models import Page feincms_page = Page.objects.for_request(request, best_match=True) template = feincms_page.theme.template except: feincms_page = None slug = None template = None else: # nested path is not allowed for this time try: slug = request.path_info.split("/")[-2:-1][0] except KeyError: raise Exception("Nested path is not allowed !") c = RequestContext(request, { 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, 'feincms_page': feincms_page, 'template': template or 'base.html', 'standalone': True, 'slug': slug, }) try: t = render_to_string('404_technical.html', c) except: from django.views.debug import TECHNICAL_404_TEMPLATE t = Template(TECHNICAL_404_TEMPLATE).render(c) return HttpResponseNotFound(t, content_type='text/html')
TypeError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/views/debug.py/technical_404_response
166
def transport(world, shinydata, filename, path): """Write shinydata to a file under the given file_name. """ if not os.path.exists(path): try: os.mkdir(path) except Exception, e: world.log.error('EXPORT FAILED: ' + str(e)) raise SportError('Error accessing the export directory for areas.') filepath = os.path.join(path, filename) try: f = open(filepath, 'w') except __HOLE__, e: world.log.debug(str(e)) raise SportError('Error writing to file. Check the logfile for details') else: f.write(shinydata) finally: f.close() return 'Export complete! Your file can be found at:\n%s' % filepath
IOError
dataset/ETHPy150Open shinymud/ShinyMUD/src/shinymud/lib/sport_plugins/transports/save_file.py/transport
167
def test_merge_dict_request(self): data = { 'name': 'miao', 'random_input': [1, 2, 3] } # Django test submits data as multipart-form by default, # which results in request.data being a MergeDict. # Wrote UserNoMergeDictViewSet to raise an exception (return 400) # if request.data ends up as MergeDict, is not a dict, or # is a dict of lists. request = Request(self.rf.post('/groups/', data)) try: response = self.view(request) self.assertEqual(response.status_code, 201) except __HOLE__ as e: message = '{0}'.format(e) if 'request.FILES' not in message: self.fail('Unexpected error: %s' % message) # otherwise, this is a known DRF 3.2 bug
NotImplementedError
dataset/ETHPy150Open AltSchool/dynamic-rest/tests/test_viewsets.py/TestMergeDictConvertsToDict.test_merge_dict_request
168
def getCmdInfoBasic( command ): typemap = { 'string' : unicode, 'length' : float, 'float' : float, 'angle' : float, 'int' : int, 'unsignedint' : int, 'on|off' : bool, 'script' : callable, 'name' : 'PyNode' } flags = {} shortFlags = {} removedFlags = {} try: lines = cmds.help( command ).split('\n') except RuntimeError: pass else: synopsis = lines.pop(0) # certain commands on certain platforms have an empty first line if not synopsis: synopsis = lines.pop(0) #_logger.debug(synopsis) if lines: lines.pop(0) # 'Flags' #_logger.debug(lines) for line in lines: line = line.replace( '(Query Arg Mandatory)', '' ) line = line.replace( '(Query Arg Optional)', '' ) tokens = line.split() try: tokens.remove('(multi-use)') multiuse = True except __HOLE__: multiuse = False #_logger.debug(tokens) if len(tokens) > 1 and tokens[0].startswith('-'): args = [ typemap.get(x.lower(), util.uncapitalize(x) ) for x in tokens[2:] ] numArgs = len(args) # lags with no args in mel require a boolean val in python if numArgs == 0: args = bool # numArgs will stay at 0, which is the number of mel arguments. # this flag should be renamed to numMelArgs #numArgs = 1 elif numArgs == 1: args = args[0] longname = str(tokens[1][1:]) shortname = str(tokens[0][1:]) if longname in keyword.kwlist: removedFlags[ longname ] = shortname longname = shortname elif shortname in keyword.kwlist: removedFlags[ shortname ] = longname shortname = longname #sometimes the longname is empty, so we'll use the shortname for both elif longname == '': longname = shortname flags[longname] = { 'longname' : longname, 'shortname' : shortname, 'args' : args, 'numArgs' : numArgs, 'docstring' : '' } if multiuse: flags[longname].setdefault('modes', []).append('multiuse') shortFlags[shortname] = longname #except: # pass #_logger.debug("could not retrieve command info for", command) res = { 'flags': flags, 'shortFlags': shortFlags, 'description' : '', 'example': '', 'type' : 'other' } if removedFlags: res['removedFlags'] = removedFlags return res
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/getCmdInfoBasic
169
def getCmdInfo( command, version='8.5', python=True ): """Since many maya Python commands are builtins we can't get use getargspec on them. besides most use keyword args that we need the precise meaning of ( if they can be be used with edit or query flags, the shortnames of flags, etc) so we have to parse the maya docs""" from parsers import CommandDocParser, mayaDocsLocation basicInfo = getCmdInfoBasic(command) try: docloc = mayaDocsLocation(version) if python: docloc = os.path.join( docloc , 'CommandsPython/%s.html' % (command) ) else: docloc = os.path.join( docloc , 'Commands/%s.html' % (command) ) f = open( docloc ) parser = CommandDocParser(command) parser.feed( f.read() ) f.close() example = parser.example example = example.rstrip() if python: pass # start with basic info, gathered using mel help command, then update with info parsed from docs # we copy because we need access to the original basic info below flags = basicInfo['flags'].copy() flags.update( parser.flags ) if command in secondaryFlags: for secondaryFlag, defaultValue, modifiedList in secondaryFlags[command]: #_logger.debug(command, "2nd", secondaryFlag) flags[secondaryFlag]['modified'] = modifiedList #_logger.debug(sorted(modifiedList)) #_logger.debug(sorted(parser.flags.keys())) for primaryFlag in modifiedList: #_logger.debug(command, "1st", primaryFlag) if 'secondaryFlags' in parser.flags[primaryFlag]: flags[primaryFlag]['secondaryFlags'].append(secondaryFlag) else: flags[primaryFlag]['secondaryFlags'] = [secondaryFlag] # add shortname lookup #_logger.debug((command, sorted( basicInfo['flags'].keys() ))) #_logger.debug((command, sorted( flags.keys() ))) # args and numArgs is more reliable from mel help command than from parsed docs, # so, here we put that back in place and create shortflags. # also use original 'multiuse' info... for flag, flagData in flags.items(): basicFlagData = basicInfo.get('flags', {}).get(flag) if basicFlagData: if 'args' in basicFlagData and 'numargs' in basicFlagData: flagData['args'] = basicFlagData['args'] flagData['numArgs'] = basicFlagData['numArgs'] if ( 'multiuse' in basicFlagData.get('modes', []) and 'multiuse' not in flagData.get('modes', [])): flagData.setdefault('modes', []).append('multiuse') shortFlags = basicInfo['shortFlags'] res = { 'flags': flags, 'shortFlags': shortFlags, 'description' : parser.description, 'example': example } try: res['removedFlags'] = basicInfo['removedFlags'] except KeyError: pass return res except __HOLE__: #_logger.debug("could not find docs for %s" % command) return basicInfo #raise IOError, "cannot find maya documentation directory"
IOError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/getCmdInfo
170
def fixCodeExamples(style='maya', force=False): """cycle through all examples from the maya docs, replacing maya.cmds with pymel and inserting pymel output. NOTE: this can only be run from gui mode WARNING: back up your preferences before running TODO: auto backup and restore of maya prefs """ manipOptions = cmds.manipOptions( q=1, handleSize=1, scale=1 ) animOptions = [] animOptions.append( cmds.animDisplay( q=1, timeCode=True ) ) animOptions.append( cmds.animDisplay( q=1, timeCodeOffset=True ) ) animOptions.append( cmds.animDisplay( q=1, modelUpdate=True ) ) openWindows = cmds.lsUI(windows=True) examples = CmdExamplesCache().read() processedExamples = CmdProcessedExamplesCache().read() processedExamples = {} if processedExamples is None else processedExamples allCmds = set(examples.keys()) # put commands that require manual interaction first manualCmds = ['fileBrowserDialog', 'fileDialog', 'fileDialog2', 'fontDialog'] skipCmds = ['colorEditor', 'emit', 'finder', 'doBlur', 'messageLine', 'renderWindowEditor', 'ogsRender', 'webBrowser', 'deleteAttrPattern'] allCmds.difference_update(manualCmds) sortedCmds = manualCmds + sorted(allCmds) for command in sortedCmds: example = examples[command] if not force and command in processedExamples: _logger.info("%s: already completed. skipping." % command) continue _logger.info("Starting command %s", command) # change from cmds to pymel reg = re.compile(r'\bcmds\.') example = reg.sub( 'pm.', example ) #example = example.replace( 'import maya.cmds as cmds', 'import pymel as pm\npm.newFile(f=1) #fresh scene' ) lines = example.split('\n') if len(lines)==1: _logger.info("removing empty example for command %s", command) examples.pop(command) processedExamples[command] = '' # write out after each success so that if we crash we don't have to start from scratch CmdProcessedExamplesCache().write(processedExamples) continue if style == 'doctest' : DOC_TEST_SKIP = ' #doctest: +SKIP' else: DOC_TEST_SKIP = '' lines[0] = 'import pymel.core as pm' + DOC_TEST_SKIP if command in skipCmds: example = '\n'.join( lines ) processedExamples[command] = example # write out after each success so that if we crash we don't have to start from scratch CmdProcessedExamplesCache().write(processedExamples) #lines.insert(1, 'pm.newFile(f=1) #fresh scene') # create a fresh scene. this does not need to be in the docstring unless we plan on using it in doctests, which is probably unrealistic cmds.file(new=1,f=1) newlines = [] statement = [] # narrowed down the commands that cause maya to crash to these prefixes if re.match( '(dis)|(dyn)|(poly)', command) : evaluate = False elif command in skipCmds: evaluate = False else: evaluate = True # gives a little leniency for where spaces are placed in the result line resultReg = re.compile('# Result:\s*(.*) #$') try: # funky things can happen when executing maya code: some exceptions somehow occur outside the eval/exec for i, line in enumerate(lines): res = None # replace with pymel results '# Result: 1 #' m = resultReg.match(line) if m: if evaluate is False: line = m.group(1) newlines.append(' ' + line) else: if evaluate: if line.strip().endswith(':') or line.startswith(' ') or line.startswith('\t'): statement.append(line) else: # evaluate the compiled statement using exec, which can do multi-line if statements and so on if statement: try: #_logger.debug("executing %s", statement) exec( '\n'.join(statement) ) # reset statement statement = [] except Exception, e: _logger.info("stopping evaluation %s", str(e))# of %s on line %r" % (command, line) evaluate = False try: _logger.debug("evaluating: %r" % line) res = eval( line ) #if res is not None: _logger.info("result", repr(repr(res))) #else: _logger.info("no result") except: #_logger.debug("failed evaluating:", str(e)) try: exec( line ) except (Exception, __HOLE__), e: _logger.info("stopping evaluation %s", str(e))# of %s on line %r" % (command, line) evaluate = False if style == 'doctest': if line.startswith(' ') or line.startswith('\t'): newlines.append(' ... ' + line ) else: newlines.append(' >>> ' + line + DOC_TEST_SKIP ) if res is not None: newlines.append( ' ' + repr(res) ) else: newlines.append(' ' + line ) if res is not None: newlines.append( ' # Result: %r #' % (res,) ) if evaluate: _logger.info("successful evaluation! %s", command) example = '\n'.join( newlines ) processedExamples[command] = example except Exception, e: raise #_logger.info("FAILED: %s: %s" % (command, e) ) else: # write out after each success so that if we crash we don't have to start from scratch CmdProcessedExamplesCache().write(processedExamples) # cleanup opened windows for ui in set(cmds.lsUI(windows=True)).difference(openWindows): try: cmds.deleteUI(ui, window=True) except:pass _logger.info("Done Fixing Examples") # restore manipulators and anim options print manipOptions cmds.manipOptions( handleSize=manipOptions[0], scale=manipOptions[1] ) cmds.animDisplay( e=1, timeCode=animOptions[0], timeCodeOffset=animOptions[1], modelUpdate=animOptions[2]) #CmdExamplesCache(examples)
TypeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/fixCodeExamples
171
def getCallbackFlags(cmdInfo): """used parsed data and naming convention to determine which flags are callbacks""" commandFlags = [] try: flagDocs = cmdInfo['flags'] except __HOLE__: pass else: for flag, data in flagDocs.items(): if data['args'] in ['script', callable] or 'command' in flag.lower(): commandFlags += [flag, data['shortname']] return commandFlags
KeyError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/getCallbackFlags
172
def testNodeCmd( funcName, cmdInfo, nodeCmd=False, verbose=False ): _logger.info(funcName.center( 50, '=')) if funcName in [ 'character', 'lattice', 'boneLattice', 'sculpt', 'wire' ]: _logger.debug("skipping") return cmdInfo # These cause crashes... confirmed that pointOnPolyConstraint still # crashes in 2012 dangerousCmds = ['doBlur', 'pointOnPolyConstraint'] if funcName in dangerousCmds: _logger.debug("skipping 'dangerous command'") return cmdInfo def _formatCmd( cmd, args, kwargs ): args = [ x.__repr__() for x in args ] kwargs = [ '%s=%s' % (key, val.__repr__()) for key, val in kwargs.items() ] return '%s( %s )' % ( cmd, ', '.join( args+kwargs ) ) def _objectToType( result ): "convert a an instance or list of instances to a python type or list of types" if isinstance(result, list): return [ type(x) for x in result ] else: return type(result) _castList = [float, int, bool] # def _listIsCastable(resultType): # "ensure that all elements are the same type and that the types are castable" # try: # typ = resultType[0] # return typ in _castList and all([ x == typ for x in resultType ]) # except IndexError: # return False module = cmds try: func = getattr(module, funcName) except AttributeError: _logger.warning("could not find function %s in modules %s" % (funcName, module.__name__)) return cmdInfo # get the current list of objects in the scene so we can cleanup later, after we make nodes allObjsBegin = set( cmds.ls(l=1) ) try: # Attempt to create the node cmds.select(cl=1) # the arglist passed from creation to general testing args = [] constrObj = None if nodeCmd: #------------------ # CREATION #------------------ obj = nodeCreationCmd(func, funcName) if isinstance(obj, list): _logger.debug("Return %s", obj) if len(obj) == 1: _logger.info("%s: creation return values need unpacking" % funcName) cmdInfo['resultNeedsUnpacking'] = True elif not obj: raise ValueError, "returned object is an empty list" objTransform = obj[0] obj = obj[-1] if obj is None: #emptyFunctions.append( funcName ) raise ValueError, "Returned object is None" elif not cmds.objExists( obj ): raise ValueError, "Returned object %s is Invalid" % obj args = [obj] except (TypeError,RuntimeError, ValueError), msg: _logger.debug("failed creation: %s", msg) else: objType = cmds.objectType(obj) #------------------ # TESTING #------------------ #(func, args, data) = cmdList[funcName] #(usePyNode, baseClsName, nodeName) flags = cmdInfo['flags'] hasQueryFlag = flags.has_key( 'query' ) hasEditFlag = flags.has_key( 'edit' ) anyNumRe = re.compile('\d+') for flag in sorted(flags.keys()): flagInfo = flags[flag] if flag in ['query', 'edit']: continue assert flag != 'ype', "%s has bad flag" % funcName # special case for constraints if constrObj and flag in ['weight']: flagargs = [constrObj] + args else: flagargs = args try: modes = flagInfo['modes'] testModes = False except KeyError, msg: #raise KeyError, '%s: %s' % (flag, msg) #_logger.debug(flag, "Testing modes") flagInfo['modes'] = [] modes = [] testModes = True # QUERY val = None argtype = flagInfo['args'] if 'query' in modes or testModes == True: if hasQueryFlag: kwargs = {'query':True, flag:True} else: kwargs = { flag:True } cmd = _formatCmd(funcName, flagargs, kwargs) try: _logger.debug(cmd) val = func( *flagargs, **kwargs ) #_logger.debug(val) resultType = _objectToType(val) # ensure symmetry between edit and query commands: # if this flag is queryable and editable, then its queried value should be symmetric to its edit arguments if 'edit' in modes and argtype != resultType: # there are certain patterns of asymmetry which we can safely correct: singleItemList = (isinstance( resultType, list) and len(resultType) ==1 and 'multiuse' not in flagInfo.get('modes', [])) # [bool] --> bool if singleItemList and resultType[0] == argtype: _logger.info("%s, %s: query flag return values need unpacking" % (funcName, flag)) flagInfo['resultNeedsUnpacking'] = True val = val[0] # [int] --> bool elif singleItemList and argtype in _castList and resultType[0] in _castList: _logger.info("%s, %s: query flag return values need unpacking and casting" % (funcName, flag)) flagInfo['resultNeedsUnpacking'] = True flagInfo['resultNeedsCasting'] = True val = argtype(val[0]) # int --> bool elif argtype in _castList and resultType in _castList: _logger.info("%s, %s: query flag return values need casting" % (funcName, flag)) flagInfo['resultNeedsCasting'] = True val = argtype(val) else: # no valid corrctions found _logger.info(cmd) _logger.info("\treturn mismatch") _logger.info('\tresult: %s', val.__repr__()) _logger.info('\tpredicted type: %s', argtype) _logger.info('\tactual type: %s', resultType) # value is no good. reset to None, so that a default will be generated for edit val = None else: _logger.debug("\tsucceeded") _logger.debug('\tresult: %s', val.__repr__()) _logger.debug('\tresult type: %s', resultType) except __HOLE__, msg: # flag is no longer supported if str(msg).startswith( 'Invalid flag' ): #if verbose: _logger.info("removing flag %s %s %s", funcName, flag, msg) shortname = flagInfo['shortname'] flagInfo.pop(flag,None) flagInfo.pop(shortname,None) modes = [] # stop edit from running else: _logger.info(cmd) _logger.info("\t" + str(msg).rstrip('\n')) val = None except RuntimeError, msg: _logger.info(cmd) _logger.info("\t" + str(msg).rstrip('\n') ) val = None else: # some flags are only in mel help and not in maya docs, so we don't know their # supported per-flag modes. we fill that in here if 'query' not in flagInfo['modes']: flagInfo['modes'].append('query') # EDIT if 'edit' in modes or testModes == True: #_logger.debug("Args:", argtype) try: # we use the value returned from query above as defaults for putting back in as edit args # but if the return was empty we need to produce something to test on. # NOTE: this is just a guess if val is None: if isinstance(argtype, list): val = [] for typ in argtype: if type == unicode or isinstance(type,basestring): val.append('persp') else: if 'query' in modes: val.append( typ(0) ) # edit only, ensure that bool args are True else: val.append( typ(1) ) else: if argtype == unicode or isinstance(argtype,basestring): val = 'persp' elif 'query' in modes: val = argtype(0) else: # edit only, ensure that bool args are True val = argtype(1) kwargs = {'edit':True, flag:val} cmd = _formatCmd(funcName, args, kwargs) _logger.debug(cmd) # some commands will either delete or rename a node, ie: # spaceLocator(e=1, name=...) # container(e=1, removeContainer=True ) # ...which will then make subsequent cmds fail. # To get around this, we need to undo the cmd. try: cmds.undoInfo(openChunk=True) editResult = func( *args, **kwargs ) finally: cmds.undoInfo(closeChunk=True) if not cmds.objExists(obj): # cmds.camera(e=1, name=...) does weird stuff - it # actually renames the parent transform, even if you give # the name of the shape... which means the shape # then gets a second 'Shape1' tacked at the end... # ...and in addition, undo is broken as well. # So we need a special case for this, where we rename... if objType == 'camera' and flag == 'name': _logger.info('\t(Undoing camera rename)') renamePattern = anyNumRe.sub('*', obj) possibleRenames = cmds.ls(renamePattern, type=objType) possibleRenames = [x for x in possibleRenames if x not in allObjsBegin] # newName might not be the exact same as our original, # but as long as it's the same maya type, and isn't # one of the originals, it shouldn't matter... newName = possibleRenames[-1] cmds.rename(newName, obj) else: _logger.info('\t(Undoing cmd)') cmds.undo() _logger.debug("\tsucceeded") #_logger.debug('\t%s', editResult.__repr__()) #_logger.debug('\t%s %s', argtype, type(editResult)) #_logger.debug("SKIPPING %s: need arg of type %s" % (flag, flagInfo['argtype'])) except TypeError, msg: if str(msg).startswith( 'Invalid flag' ): #if verbose: # flag is no longer supported _logger.info("removing flag %s %s %s", funcName, flag, msg) shortname = flagInfo['shortname'] flagInfo.pop(flag,None) flagInfo.pop(shortname,None) else: _logger.info(funcName) _logger.info("\t" + str(msg).rstrip('\n')) _logger.info("\tpredicted arg: %s", argtype) if not 'query' in modes: _logger.info("\tedit only") except RuntimeError, msg: _logger.info(cmd) _logger.info("\t" + str(msg).rstrip('\n')) _logger.info("\tpredicted arg: %s", argtype) if not 'query' in modes: _logger.info("\tedit only") else: if 'edit' not in flagInfo['modes']: flagInfo['modes'].append('edit') # cleanup allObjsEnd = set( cmds.ls(l=1) ) newObjs = list(allObjsEnd.difference( allObjsBegin ) ) if newObjs: cmds.delete( newObjs ) return cmdInfo
TypeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/testNodeCmd
173
def rebuild(self) : """Build and save to disk the list of Maya Python commands and their arguments WARNING: will unload existing plugins, then (re)load all maya-installed plugins, without making an attempt to return the loaded plugins to the state they were at before this command is run. Also, the act of loading all the plugins may crash maya, especially if done from a non-GUI session """ # Put in a debug, because this can be crashy _logger.debug("Starting CmdCache.rebuild...") # With extension can't get docs on unix 64 # path is # /usr/autodesk/maya2008-x64/docs/Maya2008/en_US/Nodes/index_hierarchy.html # and not # /usr/autodesk/maya2008-x64/docs/Maya2008-x64/en_US/Nodes/index_hierarchy.html long_version = versions.installName() _logger.info("Rebuilding the maya node hierarchy...") # Load all plugins to get the nodeHierarchy / nodeFunctions import pymel.api.plugins as plugins # We don't want to add in plugin nodes / commands - let that be done # by the plugin callbacks. However, unloading mechanism is not 100% # ... sometimes functions get left in maya.cmds... and then trying # to use those left-behind functions can cause crashes (ie, # FBXExportQuaternion). So check which methods SHOULD be unloaded # first, so we know to skip those if we come across them even after # unloading the plugin pluginCommands = set() loadedPlugins = cmds.pluginInfo(q=True, listPlugins=True) if loadedPlugins: for plug in loadedPlugins: plugCmds = plugins.pluginCommands(plug) if plugCmds: pluginCommands.update(plugCmds) plugins.unloadAllPlugins() self.nodeHierarchy = _getNodeHierarchy(long_version) nodeFunctions = [ x[0] for x in self.nodeHierarchy ] nodeFunctions += nodeTypeToNodeCommand.values() _logger.info("Rebuilding the list of Maya commands...") #nodeHierarchyTree = trees.IndexedTree(self.nodeHierarchy) self.uiClassList = UI_COMMANDS self.nodeCommandList = [] tmpModuleCmds = {} for moduleName, longname in moduleNameShortToLong.items(): tmpModuleCmds[moduleName] = getModuleCommandList( longname, long_version ) tmpCmdlist = inspect.getmembers(cmds, callable) #self.moduleCmds = defaultdict(list) self.moduleCmds = dict( (k,[]) for k in moduleNameShortToLong.keys() ) self.moduleCmds.update( {'other':[], 'runtime': [], 'context': [], 'uiClass': [] } ) def addCommand(funcName): _logger.debug('adding command: %s' % funcName) module = getModule(funcName, tmpModuleCmds) cmdInfo = {} if module: self.moduleCmds[module].append(funcName) if module != 'runtime': cmdInfo = getCmdInfo(funcName, long_version) if module != 'windows': if funcName in nodeFunctions: self.nodeCommandList.append(funcName) cmdInfo = testNodeCmd( funcName, cmdInfo, nodeCmd=True, verbose=True ) #elif module != 'context': # cmdInfo = testNodeCmd( funcName, cmdInfo, nodeCmd=False, verbose=True ) cmdInfo['type'] = module flags = getCallbackFlags(cmdInfo) if flags: cmdInfo['callbackFlags'] = flags self.cmdlist[funcName] = cmdInfo # # func, args, (usePyNode, baseClsName, nodeName) # # args = dictionary of command flags and their data # # usePyNode = determines whether the class returns its 'nodeName' or uses PyNode to dynamically return # # baseClsName = for commands which should generate a class, this is the name of the superclass to inherit # # nodeName = most creation commands return a node of the same name, this option is provided for the exceptions # try: # self.cmdlist[funcName] = args, pymelCmdsList[funcName] ) # except KeyError: # # context commands generate a class based on unicode (which is triggered by passing 'None' to baseClsName) # if funcName.startswith('ctx') or funcName.endswith('Ctx') or funcName.endswith('Context'): # self.cmdlist[funcName] = (funcName, args, (False, None, None) ) # else: # self.cmdlist[funcName] = (funcName, args, () ) for funcName, _ in tmpCmdlist : if funcName in pluginCommands: _logger.debug("command %s was a plugin command that should have been unloaded - skipping" % funcName) continue addCommand(funcName) # split the cached data for lazy loading cmdDocList = {} examples = {} for cmdName, cmdInfo in self.cmdlist.iteritems(): try: examples[cmdName] = cmdInfo.pop('example') except __HOLE__: pass newCmdInfo = {} if 'description' in cmdInfo: newCmdInfo['description'] = cmdInfo.pop('description') newFlagInfo = {} if 'flags' in cmdInfo: for flag, flagInfo in cmdInfo['flags'].iteritems(): newFlagInfo[flag] = { 'docstring' : flagInfo.pop('docstring') } newCmdInfo['flags'] = newFlagInfo if newCmdInfo: cmdDocList[cmdName] = newCmdInfo CmdDocsCache().write(cmdDocList) CmdExamplesCache().write(examples)
KeyError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/internal/cmdcache.py/CmdCache.rebuild
174
def to_screen(self, group, new_screen): """Adjust offsets of clients within current screen""" for win in self.find_clients(group): if win.maximized: win.maximized = True elif win.fullscreen: win.fullscreen = True else: # catch if the client hasn't been configured try: # By default, place window at same offset from top corner new_x = new_screen.x + win.float_x new_y = new_screen.y + win.float_y except __HOLE__: # this will be handled in .configure() pass else: # make sure window isn't off screen left/right... new_x = min(new_x, new_screen.x + new_screen.width - win.width) new_x = max(new_x, new_screen.x) # and up/down new_y = min(new_y, new_screen.y + new_screen.height - win.height) new_y = max(new_y, new_screen.y) win.x = new_x win.y = new_y win.group = new_screen.group
AttributeError
dataset/ETHPy150Open qtile/qtile/libqtile/layout/floating.py/Floating.to_screen
175
def configure(self, client, screen): if client is self.focused: bc = client.group.qtile.colorPixel(self.border_focus) else: bc = client.group.qtile.colorPixel(self.border_normal) if client.maximized: bw = self.max_border_width elif client.fullscreen: bw = self.fullscreen_border_width else: bw = self.border_width # We definitely have a screen here, so let's be sure we'll float on screen try: client.float_x client.float_y except __HOLE__: # this window hasn't been placed before, let's put it in a sensible spot x = screen.x + client.x % screen.width # try to get right edge on screen (without moving the left edge off) x = min(x, screen.x - client.width) x = max(x, screen.x) # then update it's position (`.place()` will take care of `.float_x`) client.x = x y = screen.y + client.y % screen.height y = min(y, screen.y - client.height) y = max(y, screen.y) client.y = y client.place( client.x, client.y, client.width, client.height, bw, bc ) client.unhide()
AttributeError
dataset/ETHPy150Open qtile/qtile/libqtile/layout/floating.py/Floating.configure
176
def configure(self, win, screen): # force recalc if not self.last_screen or self.last_screen != screen: self.last_screen = screen self.dirty = True if self.last_size and not self.dirty: if screen.width != self.last_size[0] or \ screen.height != self.last_size[1]: self.dirty = True if self.dirty: gi = GridInfo( self.ratio, len(self.clients), screen.width, screen.height ) self.last_size = (screen.width, screen.height) if self.fancy: method = gi.get_sizes_advanced else: method = gi.get_sizes self.layout_info = method( screen.width, screen.height, screen.x, screen.y ) self.dirty = False try: idx = self.clients.index(win) except __HOLE__: win.hide() return x, y, w, h = self.layout_info[idx] if win is self.focused: bc = self.group.qtile.colorPixel(self.border_focus) else: bc = self.group.qtile.colorPixel(self.border_normal) win.place( x, y, w - self.border_width * 2, h - self.border_width * 2, self.border_width, bc, margin=self.margin, ) win.unhide()
ValueError
dataset/ETHPy150Open qtile/qtile/libqtile/layout/ratiotile.py/RatioTile.configure
177
def default_store(database, host, port): """Gets a default store for connectable or URL. If store does not exist one is created and added to shared default store pool.""" key = (database, host, port) try: store = _default_stores[key] except __HOLE__: store = MongoDBStore(database, host=host, port=port) _default_stores[key] = store return store
KeyError
dataset/ETHPy150Open Stiivi/bubbles/bubbles/backends/mongo/objects.py/default_store
178
def new_transport_init(self, host, connect_timeout): import errno import re import socket import ssl # Jython does not have this attribute try: from socket import SOL_TCP except ImportError: # pragma: no cover from socket import IPPROTO_TCP as SOL_TCP # noqa try: from ssl import SSLError except ImportError: class SSLError(Exception): # noqa pass from struct import pack, unpack from amqp.exceptions import UnexpectedFrame from amqp.utils import get_errno, set_cloexec _UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT AMQP_PORT = 5672 EMPTY_BUFFER = bytes() # Yes, Advanced Message Queuing Protocol Protocol is redundant AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x00\x09'.encode('latin_1') # Match things like: [fe80::1]:5432, from RFC 2732 IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?') # -------------------------------------------------------------------------- # __init__ content: # -------------------------------------------------------------------------- self.connected = True msg = None port = AMQP_PORT m = IPV6_LITERAL.match(host) if m: host = m.group(1) if m.group(2): port = int(m.group(2)) else: if ':' in host: host, port = host.rsplit(':', 1) port = int(port) self.sock = None last_err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, SOL_TCP): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) try: set_cloexec(self.sock, True) except __HOLE__: pass self.sock.settimeout(connect_timeout) self.sock.connect(sa) except socket.error as exc: msg = exc self.sock.close() self.sock = None last_err = msg continue break if not self.sock: # Didn't connect, return the most recent error message raise socket.error(last_err) try: # self.sock.settimeout(None) self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self._setup_transport() self._write(AMQP_PROTOCOL_HEADER) except (OSError, IOError, socket.error) as exc: if get_errno(exc) not in _UNAVAIL: self.connected = False raise # -------------------------------------------------------------------------- # amqlib # --------------------------------------------------------------------------
NotImplementedError
dataset/ETHPy150Open cr0hn/enteletaor/enteletaor_lib/modules/brute/patch.py/new_transport_init
179
def to_internal_value(self, value): value = super(PatchItem, self).to_internal_value(value) if set(value.keys()) != set(['op', 'path', 'value']): raise ValidationError("Missing some of required parts: 'path', 'op', 'value'") if value['path'][0] != '/': raise ValidationError({'path': "Invalid path"}) value['path'] = tuple(value['path'].split('/')[1:]) if self.parent.serializer: try: field = get_field_for_path(self.parent.serializer, value['path']) except __HOLE__ as e: raise ValidationError({'path': "Missing elem: '%s'" % e.args[0]}) # import pdb; pdb.set_trace() if value['op'] in ('set', 'inc', 'dec'): if field is not None: value['value'] = field.to_internal_value(value['value']) elif value['op'] in ('push', 'add_to_set'): field = getattr(field, 'child') if field is not None: value['value'] = field.to_internal_value(value['value']) elif value['op'] in ('unset', 'pull', 'pull_all', 'min', 'max'): if value['value'] is not None: raise ValidationError({'value': "Value for '%s' expected to be null" % value['op']}) elif value['op'] in ('pop',): try: value['value'] = int(value['value']) except: raise ValidationError({'value': "Integer expected for '%s'" % value['op']}) return value
KeyError
dataset/ETHPy150Open umutbozkurt/django-rest-framework-mongoengine/rest_framework_mongoengine/contrib/patching.py/PatchItem.to_internal_value
180
def __call__(self, i, default=DEFAULT, cast=None, otherwise=None): """ request.args(0,default=0,cast=int,otherwise='http://error_url') request.args(0,default=0,cast=int,otherwise=lambda:...) """ n = len(self) if 0 <= i < n or -n <= i < 0: value = self[i] elif default is DEFAULT: value = None else: value, cast = default, False if cast: try: value = cast(value) except (__HOLE__, TypeError): from http import HTTP, redirect if otherwise is None: raise HTTP(404) elif isinstance(otherwise, str): redirect(otherwise) elif callable(otherwise): return otherwise() else: raise RuntimeError("invalid otherwise") return value
ValueError
dataset/ETHPy150Open uwdata/termite-data-server/web2py/gluon/storage.py/List.__call__
181
def _extract_aliases(self, page, id=None): dict_of_keylists = { 'url' : ['url'] } aliases_dict = provider._extract_from_xml(page, dict_of_keylists) try: doi = provider.doi_from_url_string(aliases_dict["url"]) if doi: aliases_dict["doi"] = doi except __HOLE__: pass if aliases_dict: aliases_list = [(namespace, nid) for (namespace, nid) in aliases_dict.iteritems()] else: aliases_list = [] return aliases_list
KeyError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/dataone.py/Dataone._extract_aliases
182
def print_json(obj): try: s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder) except __HOLE__: s = repr(obj) sys.stdout.write(s + "\n") sys.stdout.flush()
TypeError
dataset/ETHPy150Open bitxbay/BitXBay/electru/build/lib/electrum/util.py/print_json
183
@extensionclassmethod(Observable) def concat(cls, *args): """Concatenates all the observable sequences. 1 - res = Observable.concat(xs, ys, zs) 2 - res = Observable.concat([xs, ys, zs]) Returns an observable sequence that contains the elements of each given sequence, in sequential order. """ if isinstance(args[0], list) or isinstance(args[0], Enumerable): sources = args[0] else: sources = list(args) def subscribe(observer): enum = iter(sources) is_disposed = [False] subscription = SerialDisposable() def action(action1, state=None): if is_disposed[0]: return try: current = next(enum) except __HOLE__: observer.on_completed() except Exception as ex: observer.on_error(ex) else: d = SingleAssignmentDisposable() subscription.disposable = d d.disposable = current.subscribe( observer.on_next, observer.on_error, lambda: action1() ) cancelable = immediate_scheduler.schedule_recursive(action) def dispose(): is_disposed[0] = True return CompositeDisposable(subscription, cancelable, Disposable(dispose)) return AnonymousObservable(subscribe)
StopIteration
dataset/ETHPy150Open ReactiveX/RxPY/rx/linq/observable/concat.py/concat
184
def get_media_requests(self, item, info): try: img_elem = info.spider.scraper.get_image_elem() if img_elem.scraped_obj_attr.name in item and item[img_elem.scraped_obj_attr.name]: if not hasattr(self, 'conf'): self.conf = info.spider.conf return Request(item[img_elem.scraped_obj_attr.name]) except (ScraperElem.DoesNotExist, __HOLE__): pass
TypeError
dataset/ETHPy150Open holgerd77/django-dynamic-scraper/dynamic_scraper/pipelines.py/DjangoImagesPipeline.get_media_requests
185
def unregister(self, address): addr = ipaddress.ip_address(address) try: self._pool.remove(addr) except __HOLE__: pass
ValueError
dataset/ETHPy150Open sorz/sstp-server/sstpd/address.py/IPPool.unregister
186
@crossdomain @endpoint def get(self, **kwargs): current_app.logger.info("GETting record(s) from database") records = [] # generate meta information params = {'query': self.access_limits(**kwargs), 'projection': {}} if '_limit' in request.args: try: params['limit'] = int(request.args.get('_limit')) except __HOLE__: current_app.logger.debug("No record limit override") pass if '_fields' in request.args: params['projection'] = \ {r: True for r in request.args.get('_fields', '').split(',')} params['projection'].update(self.limit_fields(**kwargs)) if 'obj_id' in kwargs: records = self.db_query.get_instance( self.db_collection, kwargs['obj_id'], **params) if records in [{}, None]: return self._make_response(404) else: records = \ self.db_query.get_collection(self.db_collection, **params) return self._make_response(200, self.transform_payload(records))
ValueError
dataset/ETHPy150Open gevious/flask_slither/flask_slither/resources.py/BaseResource.get
187
def unquote(s): """unquote('abc%20def') -> 'abc def'.""" res = s.split('%') for i in xrange(1, len(res)): item = res[i] try: res[i] = _hextochr[item[:2]] + item[2:] except KeyError: res[i] = '%' + item except __HOLE__: res[i] = unichr(int(item[:2], 16)) + item[2:] return "".join(res)
UnicodeDecodeError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/urlparse.py/unquote
188
def test(): import sys base = '' if sys.argv[1:]: fn = sys.argv[1] if fn == '-': fp = sys.stdin else: fp = open(fn) else: try: from cStringIO import StringIO except __HOLE__: from StringIO import StringIO fp = StringIO(test_input) for line in fp: words = line.split() if not words: continue url = words[0] parts = urlparse(url) print '%-10s : %s' % (url, parts) abs = urljoin(base, url) if not base: base = abs wrapped = '<URL:%s>' % abs print '%-10s = %s' % (url, wrapped) if len(words) == 3 and words[1] == '=': if wrapped != words[2]: print 'EXPECTED', words[2], '!!!!!!!!!!'
ImportError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/urlparse.py/test
189
def reverse( self, query, exactly_one=False, timeout=None, ): """ Given a point, find an address. .. versionadded:: 1.2.0 :param string query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of (latitude, longitude), or string as "%(latitude)s, %(longitude)s" :param boolean exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. """ try: lat, lng = [ x.strip() for x in self._coerce_point_to_string(query).split(',') ] except __HOLE__: raise ValueError("Must be a coordinate pair or Point") params = { 'lat': lat, 'lng': lng, 'username': self.username } url = "?".join((self.api_reverse, urlencode(params))) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_json( self._call_geocoder(url, timeout=timeout), exactly_one )
ValueError
dataset/ETHPy150Open geopy/geopy/geopy/geocoders/geonames.py/GeoNames.reverse
190
def multi_service(): """Start all the services in separate threads""" threads = [] for service in [test_notification_service, console_monitor_service]: threads.append(threading.Thread(target=service)) for thread in threads: thread.daemon = True thread.start() while threading.active_count() > 0: try: time.sleep(0.1) except __HOLE__: exit()
KeyboardInterrupt
dataset/ETHPy150Open datastax/cstar_perf/frontend/cstar_perf/frontend/server/notifications.py/multi_service
191
def coroutine(func): """ Decorator for coroutine functions that need to block for asynchronous operations. """ @functools.wraps(func) def wrapper(*args, **kwargs): return_future = Future() def handle_future(future): # Chained futures! try: if future.exception() is not None: result = gen.throw(future.exception()) else: result = gen.send(future.result()) if isinstance(result, tuple): result = parallel(*result) result.add_done_callback(handle_future) except __HOLE__ as e: return_future.set_result(getattr(e, 'value', None)) except Exception as e: return_future.set_exception(e) try: # Handle initial value. gen = func(*args, **kwargs) except Exception as e: return_future.set_exception(e) return return_future else: # If this isn't a generator, then wrap the result with a future. if not isinstance(gen, types.GeneratorType): return_future.set_result(gen) return return_future try: result = next(gen) if isinstance(result, tuple): result = parallel(*result) result.add_done_callback(handle_future) except StopIteration as e: return_future.set_result(getattr(e, 'value', None)) except Exception as e: return_future.set_exception(e) return return_future return wrapper
StopIteration
dataset/ETHPy150Open Shizmob/pydle/pydle/async.py/coroutine
192
def _importAndCheckStack(importName): """ Import the given name as a module, then walk the stack to determine whether the failure was the module not existing, or some code in the module (for example a dependent import) failing. This can be helpful to determine whether any actual application code was run. For example, to distiguish administrative error (entering the wrong module name), from programmer error (writing buggy code in a module that fails to import). @raise Exception: if something bad happens. This can be any type of exception, since nobody knows what loading some arbitrary code might do. @raise _NoModuleFound: if no module was found. """ try: try: return __import__(importName) except __HOLE__: excType, excValue, excTraceback = sys.exc_info() while excTraceback: execName = excTraceback.tb_frame.f_globals["__name__"] if (execName is None or # python 2.4+, post-cleanup execName == importName): # python 2.3, no cleanup raise excType, excValue, excTraceback excTraceback = excTraceback.tb_next raise _NoModuleFound() except: # Necessary for cleaning up modules in 2.3. sys.modules.pop(importName, None) raise
ImportError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/python/reflect.py/_importAndCheckStack
193
def resolve_function_type(self, func, args, kws): """ Resolve function type *func* for argument types *args* and *kws*. A signature is returned. """ if func not in self._functions: # It's not a known function type, perhaps it's a global? try: func = self._lookup_global(func) except __HOLE__: pass if func in self._functions: defns = self._functions[func] for defn in defns: res = defn.apply(args, kws) if res is not None: return res if isinstance(func, types.Type): # If it's a type, it may support a __call__ method func_type = self.resolve_getattr(func, "__call__") if func_type is not None: # The function has a __call__ method, type its call. return self.resolve_function_type(func_type, args, kws) if isinstance(func, types.Callable): # XXX fold this into the __call__ attribute logic? return func.get_call_type(self, args, kws)
KeyError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext.resolve_function_type
194
def resolve_value_type(self, val): """ Return the numba type of a Python value that is being used as a runtime constant. None is returned for unsupported types. """ tp = typeof(val, Purpose.constant) if tp is not None: return tp if isinstance(val, (types.ExternalFunction, types.NumbaFunction)): return val if isinstance(val, type): if issubclass(val, BaseException): return types.ExceptionClass(val) if issubclass(val, tuple) and hasattr(val, "_asdict"): return types.NamedTupleClass(val) try: # Try to look up target specific typing information return self._get_global_type(val) except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext.resolve_value_type
195
def _get_global_type(self, gv): try: return self._lookup_global(gv) except __HOLE__: if isinstance(gv, pytypes.ModuleType): return types.Module(gv) else: raise
KeyError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext._get_global_type
196
def install_registry(self, registry): """ Install a *registry* (a templates.Registry instance) of function, attribute and global declarations. """ try: loader = self._registries[registry] except KeyError: loader = templates.RegistryLoader(registry) self._registries[registry] = loader for ftcls in loader.new_registrations('functions'): self.insert_function(ftcls(self)) for ftcls in loader.new_registrations('attributes'): self.insert_attributes(ftcls(self)) for gv, gty in loader.new_registrations('globals'): try: existing = self._lookup_global(gv) except __HOLE__: self.insert_global(gv, gty) else: # A type was already inserted, see if we can add to it newty = existing.augment(gty) if newty is None: raise TypeError("cannot augment %s with %s" % (existing, gty)) self._remove_global(gv) self._insert_global(gv, newty)
KeyError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext.install_registry
197
def _lookup_global(self, gv): """ Look up the registered type for global value *gv*. """ try: gv = weakref.ref(gv) except __HOLE__: pass return self._globals[gv]
TypeError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext._lookup_global
198
def _insert_global(self, gv, gty): """ Register type *gty* for value *gv*. Only a weak reference to *gv* is kept, if possible. """ def on_disposal(wr, pop=self._globals.pop): # pop() is pre-looked up to avoid a crash late at shutdown on 3.5 # (https://bugs.python.org/issue25217) pop(wr) try: gv = weakref.ref(gv, on_disposal) except __HOLE__: pass self._globals[gv] = gty
TypeError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext._insert_global
199
def _remove_global(self, gv): """ Remove the registered type for global value *gv*. """ try: gv = weakref.ref(gv) except __HOLE__: pass del self._globals[gv]
TypeError
dataset/ETHPy150Open numba/numba/numba/typing/context.py/BaseContext._remove_global