Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
2,000
def logSaveToFile(self, *ignore): filename = self.uiFileDialog("save", title=_("arelle - Save Messages Log"), initialdir=".", filetypes=[(_("Txt file"), "*.txt")], defaultextension=".txt") if not filename: return False try: self.logView.saveToFile(filename) except (__HOLE__, EnvironmentError) as err: tkinter.messagebox.showwarning(_("arelle - Error"), _("Failed to save {0}:\n{1}").format( filename, err), parent=self.parent) return True; # worker threads viewModelObject
IOError
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/CntlrWinMain.logSaveToFile
2,001
def __call__(self, *args): """Apply first function SUBST to arguments, than FUNC.""" try: if self.subst: args = self.subst(*args) return self.func(*args) except __HOLE__ as msg: raise SystemExit(msg) except Exception: # this was tkinter's standard coding: self.widget._report_exception() exc_type, exc_value, exc_traceback = sys.exc_info() msg = ''.join(traceback.format_exception_only(exc_type, exc_value)) tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7)) tkinter.messagebox.showerror(_("Exception"), _("{0}\nCall trace\n{1}").format(msg, tracebk))
SystemExit
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/TkinterCallWrapper.__call__
2,002
def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None def read_or_stop(): try: return readline() except __HOLE__: return '' def find_cookie(line): try: if PY3: line_string = line else: line_string = line.decode('ascii') except UnicodeDecodeError: return None matches = cookie_re.findall(line_string) if not matches: return None encoding = matches[0] try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found and codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] if not first: return 'utf-8', [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return 'utf-8', [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return 'utf-8', [first, second] # }}} # {{{ traceback formatting
StopIteration
dataset/ETHPy150Open inducer/pudb/pudb/lowlevel.py/detect_encoding
2,003
def list_post_default(self, request, **kwargs): data = self.get_request_data() serialiser = self.get_serialiser() serialiser_kwargs = self.get_serialiser_kwargs() try: with transaction.atomic(): obj = serialiser.object_inflate(data, **serialiser_kwargs) except __HOLE__ as e: return http.BadRequest(str(e)) return self.render_single_object(obj, serialiser)
ValueError
dataset/ETHPy150Open funkybob/django-nap/nap/rest/models.py/ModelPublisher.list_post_default
2,004
def clear_history(self): try: readline.clear_history() except __HOLE__: len = self.get_max_length() readline.set_history_length(0) readline.set_history_length(len)
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/grizzled/grizzled/history.py/ReadlineHistory.clear_history
2,005
def main(): paths = sys.argv[1:] or ['.'] print('Importing nltk...') try: import nltk except __HOLE__: print('Unable to import nltk -- check your PYTHONPATH.') sys.exit(-1) print('Finding definitions of deprecated funtions & classes in nltk...') find_deprecated_defs(nltk.__path__[0]) print('Looking for possible uses of deprecated funcs & classes...') dep_names = print_deprecated_uses(paths) if not dep_names: print('No deprecated funcs or classes found!') else: print("\n" + term.BOLD + "What you should use instead:" + term.NORMAL) for name in sorted(dep_names): msgs = deprecated_funcs[name].union( deprecated_classes[name]).union( deprecated_methods[name]) for msg, prefix, suffix in msgs: print(textwrap.fill(term.RED + prefix + name + suffix + term.NORMAL + ': ' + msg, width=75, initial_indent=' ' * 2, subsequent_indent=' ' * 6))
ImportError
dataset/ETHPy150Open nltk/nltk/tools/find_deprecated.py/main
2,006
def get_compute_capability(device_id=None, verbose=False): """ Query compute capability through PyCuda and check it's 5.0 (Maxwell) or greater. 5.0 (GTX750 Ti) only fp32 support 5.2 (GTX9xx series) required for fp16 By default, check all devices and return the highest compute capability. Arguments: device_id (int): CUDA device id. Default to None, will iterate over all devices if None. verbose (bool): prints verbose logging if True, default False. Returns: float: Zero if no GPU is found, otherwise highest compute capability. """ try: import pycuda import pycuda.driver as drv except __HOLE__: if verbose: print("PyCUDA module not found") return 0 try: drv.init() except pycuda._driver.RuntimeError as e: print("PyCUDA Runtime error: {0}".format(str(e))) return 0 major_string = pycuda._driver.device_attribute.COMPUTE_CAPABILITY_MAJOR minor_string = pycuda._driver.device_attribute.COMPUTE_CAPABILITY_MINOR full_version = [] if device_id is None: device_id = range(drv.Device.count()) elif isinstance(device_id, int): device_id = [device_id] for i in device_id: major = drv.Device(i).get_attribute(major_string) minor = drv.Device(i).get_attribute(minor_string) full_version += [major + minor / 10.] if verbose: print "Found GPU(s) with compute capability:", full_version return max(full_version)
ImportError
dataset/ETHPy150Open NervanaSystems/neon/neon/backends/util/check_gpu.py/get_compute_capability
2,007
def get_device_count(verbose=False): """ Query device count through PyCuda. Arguments: verbose (bool): prints verbose logging if True, default False. Returns: int: Number of GPUs available. """ try: import pycuda import pycuda.driver as drv except __HOLE__: if verbose: print("PyCUDA module not found") return 0 try: drv.init() except pycuda._driver.RuntimeError as e: print("PyCUDA Runtime error: {0}".format(str(e))) return 0 count = drv.Device.count() if verbose: print "Found %d GPU(s)", count return count
ImportError
dataset/ETHPy150Open NervanaSystems/neon/neon/backends/util/check_gpu.py/get_device_count
2,008
def read_rules(self): if not exists(self.rules_file): self.clear() return # Only read if the rules file has been modified try: mtime = getmtime(self.rules_file) except __HOLE__: log.err("Failed to get mtime of %s" % self.rules_file) return if mtime <= self.rules_last_read: return # Read new rules log.aggregator("reading new aggregation rules from %s" % self.rules_file) new_rules = [] for line in open(self.rules_file): line = line.strip() if line.startswith('#') or not line: continue rule = self.parse_definition(line) new_rules.append(rule) log.aggregator("clearing aggregation buffers") BufferManager.clear() self.rules = new_rules self.rules_last_read = mtime
OSError
dataset/ETHPy150Open graphite-project/carbon/lib/carbon/aggregator/rules.py/RuleManager.read_rules
2,009
def parse_definition(self, line): try: left_side, right_side = line.split('=', 1) output_pattern, frequency = left_side.split() method, input_pattern = right_side.split() frequency = int( frequency.lstrip('(').rstrip(')') ) return AggregationRule(input_pattern, output_pattern, method, frequency) except __HOLE__: log.err("Failed to parse line: %s" % line) raise
ValueError
dataset/ETHPy150Open graphite-project/carbon/lib/carbon/aggregator/rules.py/RuleManager.parse_definition
2,010
def get_aggregate_metric(self, metric_path): if metric_path in self.cache: return self.cache[metric_path] match = self.regex.match(metric_path) result = None if match: extracted_fields = match.groupdict() try: result = self.output_template % extracted_fields except __HOLE__: log.err("Failed to interpolate template %s with fields %s" % (self.output_template, extracted_fields)) if result: self.cache[metric_path] = result return result
TypeError
dataset/ETHPy150Open graphite-project/carbon/lib/carbon/aggregator/rules.py/AggregationRule.get_aggregate_metric
2,011
def refresh_cache(f): """Decorator to update the instance_info_cache Requires context and instance as function args """ argspec = inspect.getargspec(f) @functools.wraps(f) def wrapper(self, context, *args, **kwargs): try: # get the instance from arguments (or raise ValueError) instance = kwargs.get('instance') if not instance: instance = args[argspec.args.index('instance') - 2] except __HOLE__: msg = _('instance is a required argument to use @refresh_cache') raise Exception(msg) with lockutils.lock('refresh_cache-%s' % instance.uuid): # We need to call the wrapped function with the lock held to ensure # that it can call _get_instance_nw_info safely. res = f(self, context, *args, **kwargs) update_instance_cache_with_nw_info(self, context, instance, nw_info=res) # return the original function's return value return res return wrapper
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/base_api.py/refresh_cache
2,012
def render(self, name, value, attrs=None): elements = [] try: values = json.loads(value) except __HOLE__: values = {} # value sometimes come as unicode and we need to treat it if type(values) == unicode: values = json.loads(values) objs = self.model.objects.filter( application__contains=self.attrs.get('_model', None)) for obj in objs: o = {} o['name'] = obj.name o['slug'] = obj.slug element_attr = {} element_attr['name'] = obj.name element_attr['slug'] = obj.slug """ element_attr['value'] = '1' element_attr['obj_value'] = values.get(obj.slug, '') """ element_attr['obj_value'] = values.get(obj.slug, '') if obj.type in ["checkbox", "radio"]: fo = FieldOption.objects.filter(field=obj) if obj.type == "checkbox": obj_value = [] for i in fo: key = "{0}_{1}".format(obj.slug, i.option.slug) obj_value.append(values.get(key, '')) element_attr['list'] = zip(fo, obj_value) del element_attr['obj_value'] else: element_attr['list'] = fo o['element'] = render_to_string( "admin/opps/fields/json_{0}.html".format(obj.type), dictionary=element_attr ) elements.append(o) # OPPS Editor params # This is ugly as hell but for now it was the only way of getting this # working DRY js = CONFIG.get('js')[0] # must pass an string with commas on plugins plugins = '' for item in CONFIG.get('plugins'): line = ','.join(item.split()) line += ',' plugins += line language = CONFIG.get('language') theme = CONFIG.get('theme', 'modern') file_browser_callback = CONFIG.get('file_browser_callback') return render_to_string( "admin/opps/fields/json.html", {"elements": elements, "name": name, "value": value, "js": js, "theme": theme, "plugins": plugins, "language": language, "file_browser_callback": file_browser_callback, })
TypeError
dataset/ETHPy150Open opps/opps/opps/fields/widgets.py/JSONField.render
2,013
def update(self): ''' Update the form in background ''' # get the information try: disk_info = self.statistics['Disk']['text']['/'] swap_info = self.statistics['Memory']['text']['swap_memory'] memory_info = self.statistics['Memory']['text']['memory'] processes_info = self.statistics['Process']['text'] system_info = self.statistics['System']['text'] cpu_info = self.statistics['CPU']['graph'] # overview row1 = "Disk Usage (/) {4}{0: <6}/{1: >6} MB{4}{2: >2} %{5}Processes{4}{3: <8}".format(disk_info["used"], disk_info["total"], disk_info["percentage"], processes_info["running_processes"], " "*int(4*self.X_SCALING_FACTOR), " "*int(9*self.X_SCALING_FACTOR)) row2 = "Swap Memory {4}{0: <6}/{1: >6} MB{4}{2: >2} %{5}Threads {4}{3: <8}".format(swap_info["active"], swap_info["total"], swap_info["percentage"], processes_info["running_threads"], " "*int(4*self.X_SCALING_FACTOR), " "*int(9*self.X_SCALING_FACTOR)) row3 = "Main Memory {4}{0: <6}/{1: >6} MB{4}{2: >2} %{5}Boot Time{4}{3: <8}".format(memory_info["active"], memory_info["total"], memory_info["percentage"], system_info['running_time'], " "*int(4*self.X_SCALING_FACTOR), " "*int(9*self.X_SCALING_FACTOR)) self.basic_stats.value = row1 + '\n' + row2 + '\n' + row3 self.basic_stats.display() ### cpu_usage chart cpu_canvas = Canvas() next_peak_height = int(math.ceil((float(cpu_info['percentage'])/100)*self.CHART_HEIGHT)) self.cpu_chart.value = (self.draw_chart(cpu_canvas,next_peak_height,'cpu')) self.cpu_chart.display() ### memory_usage chart memory_canvas = Canvas() next_peak_height = int(math.ceil((float(memory_info['percentage'])/100)*self.CHART_HEIGHT)) self.memory_chart.value = self.draw_chart(memory_canvas,next_peak_height,'memory') self.memory_chart.display() ### processes_table processes_table = self.statistics['Process']['table'] # check sorting flags if MEMORY_SORT: sorted_table = sorted(processes_table,key=lambda k:k['memory'],reverse=True) elif TIME_SORT: sorted_table = sorted(processes_table,key=lambda k:k['rawtime'],reverse=True) else: sorted_table = processes_table # to keep things pre computed temp_list = [] for proc in sorted_table: if proc['user'] == system_info['user']: temp_list.append("{0: <30} {1: >5}{5}{2: <10}{5}{3}{5}{4: >6.2f} % \ ".format( (proc['name'][:25] + '...') if len(proc['name']) > 25 else proc['name'], proc['id'], proc['user'], proc['time'], proc['memory'], " "*int(5*self.X_SCALING_FACTOR)) ) self.processes_table.entry_widget.values = temp_list self.processes_table.display() # catch the fucking KeyError caused to c # cumbersome point of reading the stats data structures except __HOLE__: pass
KeyError
dataset/ETHPy150Open black-perl/ptop/ptop/interfaces/GUI.py/PtopGUI.update
2,014
def _set_bind_addr(self, value): if value is None: self.socket_file = None self.socket_host = None self.socket_port = None elif isinstance(value, basestring): self.socket_file = value self.socket_host = None self.socket_port = None else: try: self.socket_host, self.socket_port = value self.socket_file = None except __HOLE__: raise ValueError("bind_addr must be a (host, port) tuple " "(for TCP sockets) or a string (for Unix " "domain sockets), not %r" % value)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/_cpserver.py/Server._set_bind_addr
2,015
def _fore(self, color): def get(what): try: r = getattr(self._modifiers, what) except __HOLE__: r = getattr(self._forecolors, what) return r args = map(get, color.split(u"_")) return u"".join(args)
AttributeError
dataset/ETHPy150Open gabrielfalcao/couleur/couleur/__init__.py/Shell._fore
2,016
def parse_addr_spec(spec, defhost = None, defport = None): """Parse a host:port specification and return a 2-tuple ("host", port) as understood by the Python socket functions. >>> parse_addr_spec("192.168.0.1:9999") ('192.168.0.1', 9999) If defhost or defport are given and not None, the respective parts of the specification may be omitted, and will be filled in with the defaults. If defhost or defport are omitted or None, the respective parts of the specification must be given, or else a ValueError will be raised. >>> parse_addr_spec("192.168.0.2:8888", defhost="192.168.0.1", defport=9999) ('192.168.0.2', 8888) >>> parse_addr_spec(":8888", defhost="192.168.0.1", defport=9999) ('192.168.0.1', 8888) >>> parse_addr_spec("192.168.0.2", defhost="192.168.0.1", defport=9999) ('192.168.0.2', 9999) >>> parse_addr_spec("192.168.0.2:", defhost="192.168.0.1", defport=9999) ('192.168.0.2', 9999) >>> parse_addr_spec(":", defhost="192.168.0.1", defport=9999) ('192.168.0.1', 9999) >>> parse_addr_spec("", defhost="192.168.0.1", defport=9999) ('192.168.0.1', 9999) >>> parse_addr_spec(":") Traceback (most recent call last): [..] ValueError: Bad address specification ":" >>> parse_addr_spec(":", "", 0) ('', 0) IPv6 addresses must be enclosed in square brackets.""" host = None port = None af = 0 m = None # IPv6 syntax. if not m: m = re.match(ur'^\[(.+)\]:(\d*)$', spec) if m: host, port = m.groups() af = socket.AF_INET6 if not m: m = re.match(ur'^\[(.+)\]$', spec) if m: host, = m.groups() af = socket.AF_INET6 # IPv4/hostname/port-only syntax. if not m: try: host, port = spec.split(":", 1) except __HOLE__: host = spec if re.match(ur'^[\d.]+$', host): af = socket.AF_INET else: af = 0 host = host or defhost port = port or defport if host is None or port is None: raise ValueError("Bad address specification \"%s\"" % spec) return host, int(port)
ValueError
dataset/ETHPy150Open arlolra/flashproxy/flashproxy/util.py/parse_addr_spec
2,017
def approximate_current_flow_betweenness_centrality(G, normalized=True, weight='weight', dtype=float, solver='full', epsilon=0.5, kmax=10000): r"""Compute the approximate current-flow betweenness centrality for nodes. Approximates the current-flow betweenness centrality within absolute error of epsilon with high probability [1]_. Parameters ---------- G : graph A NetworkX graph normalized : bool, optional (default=True) If True the betweenness values are normalized by 2/[(n-1)(n-2)] where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). epsilon: float Absolute error tolerance. kmax: int Maximum number of sample node pairs to use for approximation. Returns ------- nodes : dictionary Dictionary of nodes with betweenness centrality as the value. See Also -------- current_flow_betweenness_centrality Notes ----- The running time is `O((1/\epsilon^2)m{\sqrt k} \log n)` and the space required is `O(m)` for n nodes and m edges. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Ulrik Brandes and Daniel Fleischer: Centrality Measures Based on Current Flow. Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf """ from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except __HOLE__: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: from scipy import sparse from scipy.sparse import linalg except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") solvername={"full" :FullInverseLaplacian, "lu": SuperLUInverseLaplacian, "cg": CGInverseLaplacian} n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) # make a copy with integer labels according to rcm ordering # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G,dict(zip(ordering,range(n)))) L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight, dtype=dtype, format='csc') C = solvername[solver](L, dtype=dtype) # initialize solver betweenness = dict.fromkeys(H,0.0) nb = (n-1.0)*(n-2.0) # normalization factor cstar = n*(n-1)/nb l = 1 # parameter in approximation, adjustable k = l*int(np.ceil((cstar/epsilon)**2*np.log(n))) if k > kmax: raise nx.NetworkXError('Number random pairs k>kmax (%d>%d) '%(k,kmax), 'Increase kmax or epsilon') cstar2k = cstar/(2*k) for i in range(k): s,t = random.sample(range(n),2) b = np.zeros(n, dtype=dtype) b[s] = 1 b[t] = -1 p = C.solve(b) for v in H: if v==s or v==t: continue for nbr in H[v]: w = H[v][nbr].get(weight,1.0) betweenness[v] += w*np.abs(p[v]-p[nbr])*cstar2k if normalized: factor = 1.0 else: factor = nb/2.0 # remap to original node names and "unnormalize" if required return dict((ordering[k],float(v*factor)) for k,v in betweenness.items())
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/centrality/current_flow_betweenness.py/approximate_current_flow_betweenness_centrality
2,018
def current_flow_betweenness_centrality(G, normalized=True, weight='weight', dtype=float, solver='full'): r"""Compute current-flow betweenness centrality for nodes. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph normalized : bool, optional (default=True) If True the betweenness values are normalized by 2/[(n-1)(n-2)] where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of nodes with betweenness centrality as the value. See Also -------- approximate_current_flow_betweenness_centrality betweenness_centrality edge_betweenness_centrality edge_current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw)` where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005). """ from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except __HOLE__: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) # make a copy with integer labels according to rcm ordering # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G,dict(zip(ordering,range(n)))) betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): pos = dict(zip(row.argsort()[::-1],range(n))) for i in range(n): betweenness[s] += (i-pos[i])*row[i] betweenness[t] += (n-i-1-pos[i])*row[i] if normalized: nb = (n-1.0)*(n-2.0) # normalization factor else: nb = 2.0 for i,v in enumerate(H): # map integers to nodes betweenness[v] = float((betweenness[v]-i)*2.0/nb) return dict((ordering[k],v) for k,v in betweenness.items())
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/centrality/current_flow_betweenness.py/current_flow_betweenness_centrality
2,019
def edge_current_flow_betweenness_centrality(G, normalized=True, weight='weight', dtype=float, solver='full'): """Compute current-flow betweenness centrality for edges. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph normalized : bool, optional (default=True) If True the betweenness values are normalized by 2/[(n-1)(n-2)] where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of edge tuples with betweenness centrality as the value. Raises ------ NetworkXError The algorithm does not support DiGraphs. If the input graph is an instance of DiGraph class, NetworkXError is raised. See Also -------- betweenness_centrality edge_betweenness_centrality current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw) where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005). """ from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except __HOLE__: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('edge_current_flow_betweenness_centrality ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) # make a copy with integer labels according to rcm ordering # this could be done without a copy if we really wanted to H = nx.relabel_nodes(G,dict(zip(ordering,range(n)))) betweenness=(dict.fromkeys(H.edges(),0.0)) if normalized: nb=(n-1.0)*(n-2.0) # normalization factor else: nb=2.0 for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): pos=dict(zip(row.argsort()[::-1],range(1,n+1))) for i in range(n): betweenness[e]+=(i+1-pos[i])*row[i] betweenness[e]+=(n-i-pos[i])*row[i] betweenness[e]/=nb return dict(((ordering[s],ordering[t]),float(v)) for (s,t),v in betweenness.items()) # fixture for nose tests
ImportError
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/centrality/current_flow_betweenness.py/edge_current_flow_betweenness_centrality
2,020
def get_or_create_iexact(self, **kwargs): """ Case insensitive title version of ``get_or_create``. Also allows for multiple existing results. """ lookup = dict(**kwargs) try: lookup["title__iexact"] = lookup.pop("title") except __HOLE__: pass try: return self.filter(**lookup)[0], False except IndexError: return self.create(**kwargs), True
KeyError
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/generic/managers.py/KeywordManager.get_or_create_iexact
2,021
def run_cl(argv=[]): logging.basicConfig(level=logging.INFO) try: app = MultioptParser( clsname='floyd', version=floyd.get_version(), desc_short="Static website generator", global_options=[ make_option("-v", "--verbose", action="store_true", dest="verbose"), make_option("-d", "--debug", action="store_true", dest="debug"), ], command_set=commands, add_help=True, add_version=True) return app.run() except __HOLE__: cl_error('Interrupted.') return 1
KeyboardInterrupt
dataset/ETHPy150Open nikcub/floyd/floyd/core/command_utils.py/run_cl
2,022
def integer(x): try: int(x) except (ValueError, __HOLE__): raise ValueError("%r is not a valid integer" % x) else: return x
TypeError
dataset/ETHPy150Open cloudtools/troposphere/troposphere/validators.py/integer
2,023
def add_buffer(self, data=''): if data: self.__buffer += data packets = self.__buffer.split(DELIMITER) self.__buffer = b'' for packet in packets: try: self.__process_packet(packet) except __HOLE__: self.__buffer = packet
ValueError
dataset/ETHPy150Open circuits/circuits/circuits/node/protocol.py/Protocol.add_buffer
2,024
def _kill(self, f): # close and remove file try: f.close() except (SystemExit, KeyboardInterrupt): raise except: pass try: os.unlink(f.name) except (SystemExit, __HOLE__): raise except: pass
KeyboardInterrupt
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_uu.py/UUFileTest._kill
2,025
def mutate_value(element,parts,rel_amount=None): i = random.randint(0,len(element.values)-1) val = element.values[i] name = element.spice_name if rel_amount==None: try: val[i] = log_dist(parts[name]['value'][0],parts[name]['value'][1]) except: return element else: try: temp = val[i]*(2*random.random-1)*rel_amount if parts[name]['value'][0]<=temp<=parts[name]['value'][1]: val[i] = temp except: return element try: cost = parts[element.spice_name]['cost'] except __HOLE__: cost = 0 return Device(element.spice_name,element.nodes,cost,val)
KeyError
dataset/ETHPy150Open Ttl/evolutionary-circuits/evolutionary/chromosomes/netlist.py/mutate_value
2,026
def mutate(self): m = random.randint(0,7) i = random.randint(0,len(self.elements)-1) if m==0: #Change value of one component m = random.randint(0,1) if m==0: #New value self.elements[i] = mutate_value(self.elements[i],self.parts_list) else: #Slight change self.elements[i] = mutate_value(self.elements[i],self.parts_list,rel_amount=0.1) elif m==1: #Add one component if not already maximum number of components if len(self.elements)<self.max_parts: #self.elements.append(random_element(self.parts_list,self.nodes)) self.elements.append(random_element(self.parts_list,self.nodes,fixed_node=self.get_connected_node())) elif m==2 and len(self.elements)>1: #Replace one component with open circuit del self.elements[i] elif m==3 and len(self.elements)>1: #Replace one component with open circuit nodes = self.elements[i].nodes random.shuffle(nodes) try: n1 = nodes[0] n2 = nodes[1] except __HOLE__: return None#Device doesn't have two nodes del self.elements[i] for element in self.elements: element.nodes = [(n1 if i==n2 else i) for i in element.nodes] elif m==4: #Replace one component keeping one node connected fixed_node = random.choice(self.elements[i].nodes) del self.elements[i] self.elements.append(random_element(self.parts_list,self.nodes,fixed_node=fixed_node)) elif m==5: #Shuffle list of elements(better crossovers) random.shuffle(self.elements) elif m==6: #Change the extra_value if self.extra_range!=None: i = random.randint(0,len(self.extra_value)-1) self.extra_value[i] = random.uniform(*self.extra_range[i]) else: self.mutate() elif m==7: #Relabel nodes l = len(self.elements)-1 n1 = random.choice(self.elements[random.randint(0,l)].nodes) n2 = random.choice(self.elements[random.randint(0,l)].nodes) tries = 0 while tries<10 or n1!=n2: n2 = random.choice(self.elements[random.randint(0,l)].nodes) tries+=1 for element in self.elements: element.nodes = [(n1 if i==n2 else (n2 if i==n1 else i)) for i in element.nodes]
IndexError
dataset/ETHPy150Open Ttl/evolutionary-circuits/evolutionary/chromosomes/netlist.py/Chromosome.mutate
2,027
def parse_commits(self): import Commit events = [] # if this is a cloned repository if not self.from_feed: repo_dir = os.path.join(settings.REPO_ROOT, self.project.url_path) # add the commits backend = get_backend(self.vcs if self.vcs != 'svn' else 'git') repository = backend.Repository(repo_dir) # inspect the last five days of commits for commit in repository.get_recent_commits(self.most_recent_date): date = commit.time try: date = (date - date.utcoffset()).replace(tzinfo=None) except: pass # process the diff of this commit try: diff, added, removed, changed = format_diff(commit.diff) except KeyError: diff, added, removed, changed = "", "Unknown", "Unknown", "Unknown" # extract the title of the commit try: commit_title = re.findall(r"^.*\.\s", commit.message)[0].strip() except __HOLE__: commit_title = commit.message.split("\n")[0] # format the commit message commit.message = re.sub(r"<p>\s+</p>", "", markdown(commit.message.replace(commit_title, "").decode('utf-8'), safe_mode = True)) append_unsanitized = ("<div class=\"light-bar\">{0} file{1} changed," + " {2} line{3} added, {4} line{5} removed</div>").format( changed, 's' if changed != 1 else '', added, 's' if added != 1 else '', removed, 's' if removed != 1 else '' ) events.append(self.add_event(Commit.Commit, title = commit_title, summary = commit.message, date = date, author_name = commit.author, from_feed = False, append_unsanitized = append_unsanitized, extra_args = { "diff": diff, "repository_id": self.id, })) # this is a feed-driven repository else: for commit in feedparser.parse(self.repo_rss).entries: date = dateutil.parser.parse(commit.date) try: date = (date - date.utcoffset()).replace(tzinfo=None) except: pass events.append(self.add_event(Commit.Commit, title = commit.title, summary = commit.description, date = date, author_name = commit.author_detail['name'], from_feed = True, extra_args = { "repository_id": self.id } )) # find the new most recent date dates = [event.date for event in events if event is not None] dates.append(self.most_recent_date) self.most_recent_date = max(dates) self.save()
IndexError
dataset/ETHPy150Open rcos/Observatory/observatory/dashboard/models/Repository.py/Repository.parse_commits
2,028
def clone_or_fetch(self): if self.from_feed: return fresh_clone = True # ensure that REPO_ROOT already exists try: os.makedirs(settings.REPO_ROOT, 0770) except __HOLE__ as e: pass # construct the name of the directory into which to clone the repository dest_dir = os.path.join(settings.REPO_ROOT, self.project.url_path) # check if we've already cloned this project if os.path.isdir(dest_dir): fresh_clone = False # clone the repository, or update our copy try: clone_repo_function(self.vcs)(self.clone_url, dest_dir, fresh_clone) except Repository.CheckoutFailureException: # if we couldn't update the repository, remove anything we got and try once more remove_repo(dest_dir) fresh_clone = True clone_repo_function(self.vcs)(self.clone_url, dest_dir, fresh_clone)
OSError
dataset/ETHPy150Open rcos/Observatory/observatory/dashboard/models/Repository.py/Repository.clone_or_fetch
2,029
def clone_svn_repo(clone_url, destination_dir, fresh_clone = False): if fresh_clone: # make the repo's directory try: os.makedirs(destination_dir, 0770) except __HOLE__ as e: pass clone_cmdline = ["git", "svn", "clone", clone_url, destination_dir] else: clone_cmdline = ["git", "svn", "fetch"] if subprocess.call(clone_cmdline, cwd = destination_dir) != 0: raise Repository.CheckoutFailureException(" ".join(clone_cmdline))
OSError
dataset/ETHPy150Open rcos/Observatory/observatory/dashboard/models/Repository.py/clone_svn_repo
2,030
def getImage(self): if pil is None or self._pilimage == 'failed': return None if self._pilimage: return self._pilimage else: data = self.getData() if not data: self._pilimage = 'failed' return None try: self._pilimage = pil.open( BytesIO(data) ) self._pilimage.load() except __HOLE__ as ex: self._pilimage = 'failed' return None return self._pilimage
IOError
dataset/ETHPy150Open pycollada/pycollada/collada/material.py/CImage.getImage
2,031
@staticmethod def load(collada, localscope, node): localscope = {} # we have our own scope, shadow it params = [] id = node.get('id') profilenode = node.find( tag('profile_COMMON') ) if profilenode is None: raise DaeUnsupportedError('Found effect with profile other than profile_COMMON') #<image> can be local to a material instead of global in <library_images> for imgnode in profilenode.findall( tag('image') ): local_image = CImage.load(collada, localscope, imgnode) localscope[local_image.id] = local_image global_image_id = local_image.id uniquenum = 2 while global_image_id in collada.images: global_image_id = local_image.id + "-" + uniquenum uniquenum += 1 collada.images.append(local_image) Effect.getEffectParameters(collada, profilenode, localscope, params) tecnode = profilenode.find( tag('technique') ) Effect.getEffectParameters(collada, tecnode, localscope, params) shadnode = None for shad in Effect.shaders: shadnode = tecnode.find(tag(shad)) shadingtype = shad if not shadnode is None: break if shadnode is None: raise DaeIncompleteError('No material properties found in effect') props = {} for key in Effect.supported: pnode = shadnode.find( tag(key) ) if pnode is None: props[key] = None else: try: props[key] = Effect._loadShadingParam(collada, localscope, pnode) except DaeMissingSampler2D as ex: if ex.samplerid in collada.images: #Whoever exported this collada file didn't include the proper references so we will create them surf = Surface(ex.samplerid + '-surface', collada.images[ex.samplerid], 'A8R8G8B8') sampler = Sampler2D(ex.samplerid, surf, None, None); params.append(surf) params.append(sampler) localscope[surf.id] = surf localscope[sampler.id] = sampler try: props[key] = Effect._loadShadingParam( collada, localscope, pnode) except DaeUnsupportedError as ex: props[key] = None collada.handleError(ex) except DaeUnsupportedError as ex: props[key] = None collada.handleError(ex) # Give the chance to ignore error and load the rest if key == 'transparent' and key in props and props[key] is not None: opaque_mode = pnode.get('opaque') if opaque_mode is not None and opaque_mode == OPAQUE_MODE.RGB_ZERO: props['opaque_mode'] = OPAQUE_MODE.RGB_ZERO props['xmlnode'] = node bumpnode = node.find('.//%s//%s' % (tag('extra'), tag('texture'))) if bumpnode is not None: bumpmap = Map.load(collada, localscope, bumpnode) else: bumpmap = None double_sided_node = node.find('.//%s//%s' % (tag('extra'), tag('double_sided'))) double_sided = False if double_sided_node is not None and double_sided_node.text is not None: try: val = int(double_sided_node.text) if val == 1: double_sided = True except __HOLE__: pass return Effect(id, params, shadingtype, bumpmap, double_sided, **props)
ValueError
dataset/ETHPy150Open pycollada/pycollada/collada/material.py/Effect.load
2,032
@staticmethod def _loadShadingParam( collada, localscope, node ): """Load from the node a definition for a material property.""" children = node.getchildren() if not children: raise DaeIncompleteError('Incorrect effect shading parameter '+node.tag) vnode = children[0] if vnode.tag == tag('color'): try: value = tuple([ float(v) for v in vnode.text.split() ]) except __HOLE__ as ex: raise DaeMalformedError('Corrupted color definition in effect '+id) except IndexError as ex: raise DaeMalformedError('Corrupted color definition in effect '+id) elif vnode.tag == tag('float'): try: value = float(vnode.text) except ValueError as ex: raise DaeMalformedError('Corrupted float definition in effect '+id) elif vnode.tag == tag('texture'): value = Map.load(collada, localscope, vnode) elif vnode.tag == tag('param'): refid = vnode.get('ref') if refid is not None and refid in localscope: value = localscope[refid] else: return None else: raise DaeUnsupportedError('Unknown shading param definition ' + \ vnode.tag) return value
ValueError
dataset/ETHPy150Open pycollada/pycollada/collada/material.py/Effect._loadShadingParam
2,033
def get_current(self): """ Returns the current ``Site`` based on the SITE_ID in the project's settings. The ``Site`` object is cached the first time it's retrieved from the database. """ from django.conf import settings try: sid = settings.SITE_ID except __HOLE__: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.") try: current_site = SITE_CACHE[sid] except KeyError: current_site = self.get(pk=sid) SITE_CACHE[sid] = current_site return current_site
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/sites/models.py/SiteManager.get_current
2,034
def clear_site_cache(sender, **kwargs): """ Clears the cache (if primed) each time a site is saved or deleted """ instance = kwargs['instance'] try: del SITE_CACHE[instance.pk] except __HOLE__: pass
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/sites/models.py/clear_site_cache
2,035
@register.filter def date(value, arg=None): """Formats a date according to the given format.""" if not value: return u'' if arg is None: arg = settings.DATE_FORMAT try: return formats.date_format(value, arg) except __HOLE__: try: return format(value, arg) except AttributeError: return ''
AttributeError
dataset/ETHPy150Open crate-archive/crate-site/crateweb/apps/core/helpers.py/date
2,036
def ensure_oauth2(provider): """ Decorator to ensure a user has been authenticated with the given oauth2 provider. Usage: from tethys_sdk.services import ensure_oauth2, get_dataset_engine @ensure_oauth2('hydroshare-oauth2') def controller(request): engine = get_dataset_engine('default_hydroshare', request=request) return render(request, 'my_template.html', {}) Note that calling get_dataset_engine for a hydroshare dataset engine will throw an error if it is not called in a function that is decorated with the ensure_oauth2 decorator. """ def decorator(function): @wraps(function) def wrapper(request, *args, **kwargs): user = request.user # Assemble redirect response redirect_url = reverse('social:begin', args=[provider]) + '?next={0}'.format(request.path) redirect_response = redirect(redirect_url) try: user.social_auth.get(provider=provider) except ObjectDoesNotExist: # User is not associated with that provider return redirect_response except __HOLE__: # Anonymous User needs to be logged in and associated with that provider # return redirect('/login/{0}/?next={1}'.format(provider, request.path)) return redirect_response except AuthAlreadyAssociated: # Another user has already used the account to associate... raise except: raise return function(request, *args, **kwargs) return wrapper return decorator
AttributeError
dataset/ETHPy150Open tethysplatform/tethys/tethys_services/utilities.py/ensure_oauth2
2,037
def initialize_engine_object(engine, endpoint, apikey=None, username=None, password=None, request=None): """ Initialize a DatasetEngine object from a string that points at the engine class. """ # Constants HYDROSHARE_OAUTH_PROVIDER_NAME = 'hydroshare' # Derive import parts from engine string engine_split = engine.split('.') module_string = '.'.join(engine_split[:-1]) engine_class_string = engine_split[-1] # Import module = __import__(module_string, fromlist=[engine_class_string]) EngineClass = getattr(module, engine_class_string) # Get Token for HydroShare interactions if EngineClass is HydroShareDatasetEngine: user = request.user try: # social = user.social_auth.get(provider='google-oauth2') social = user.social_auth.get(provider=HYDROSHARE_OAUTH_PROVIDER_NAME) apikey = social.extra_data['access_token'] except __HOLE__: # User is not associated with that provider # Need to prompt for association raise AuthException("HydroShare authentication required. To automate the authentication prompt decorate " "your controller function with the @ensure_oauth('hydroshare') decorator.") except AttributeError: # Anonymous User... raise except AuthAlreadyAssociated: raise except: raise # Create Engine Object engine_instance = EngineClass(endpoint=endpoint, apikey=apikey, username=username, password=password) return engine_instance
ObjectDoesNotExist
dataset/ETHPy150Open tethysplatform/tethys/tethys_services/utilities.py/initialize_engine_object
2,038
def abstract_is_link(process): """ Determine if the process abstract is a link. Args: process (owslib.wps.Process): WPS Process object. Returns: (bool): True if abstract is a link, False otherwise. """ try: abstract = process.abstract except __HOLE__: return False if abstract[:4] == 'http': return True else: return False
AttributeError
dataset/ETHPy150Open tethysplatform/tethys/tethys_services/utilities.py/abstract_is_link
2,039
def activate_wps(wps, endpoint, name): """ Activate a WebProcessingService object by calling getcapabilities() on it and handle errors appropriately. Args: wps (owslib.wps.WebProcessingService): A owslib.wps.WebProcessingService object. Returns: (owslib.wps.WebProcessingService): Returns an activated WebProcessingService object or None if it is invalid. """ # Initialize the object with get capabilities call try: wps.getcapabilities() except __HOLE__ as e: if e.code == 404: e.msg = 'The WPS service could not be found at given endpoint "{0}" for site WPS service ' \ 'named "{1}". Check the configuration of the WPS service in your ' \ 'settings.py.'.format(endpoint, name) raise e else: raise e except URLError as e: return None except: raise return wps
HTTPError
dataset/ETHPy150Open tethysplatform/tethys/tethys_services/utilities.py/activate_wps
2,040
@register.tag def settings(parser, token): var_name = None try: tag_name, arg = token.contents.split(None, 1) except __HOLE__: raise template.TemplateSyntaxError( "%r tag requires arguments" % token.contents.split()[0]) m = re.search(r'(.*?) as (\w+)', arg) if m: arg, var_name = m.groups() if not (arg[0] == arg[-1] and arg[0] in ('"', "'")): raise template.TemplateSyntaxError( "%r tag's argument should be in quotes" % tag_name) return ContextNode(django_settings.get(arg[1:-1]), var_name)
ValueError
dataset/ETHPy150Open jqb/django-settings/django_settings/templatetags/settings_tags.py/settings
2,041
def _get_templated_url(self, template, input_url, method=None): url = None try: if method=="members": match = re.match("^https://publons.com/author/(\d+)/.+", input_url) user_id = match.group(1) url = template % user_id else: match = re.match("^https://publons.com/r.*/(\d+).*", input_url) review_id = match.group(1) url = template % review_id except __HOLE__: pass return(url)
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/publons.py/Publons._get_templated_url
2,042
def _extract_biblio(self, page, id=None): dict_of_keylists = { 'title' : ['title'], 'authors' : ['author', 'last_name'], 'journal' : ['source', 'provider'], 'review_url' : ['source', 'url'], 'review_type' : ['review_type'], 'create_date' : ['datetime_reviewed'], 'free_fulltext_url' : ['_id', 'url'], 'source_provider' : ['source', 'provider'], 'source_url' : ['source', 'url'] } biblio_dict = provider._extract_from_json(page, dict_of_keylists) biblio_dict["genre"] = "peer review" biblio_dict["title"] = "Review of " + biblio_dict["title"] if "source_provider" in biblio_dict and biblio_dict["source_provider"]: biblio_dict["repository"] = biblio_dict["source_provider"] else: biblio_dict["repository"] = "Publons" if "source_url" in biblio_dict and biblio_dict["source_url"]: biblio_dict["free_fulltext_url"] = biblio_dict["source_url"] #overwrite with original source try: biblio_dict["year"] = biblio_dict["create_date"][0:4] except __HOLE__: pass return biblio_dict
KeyError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/publons.py/Publons._extract_biblio
2,043
def serve_forever(host, port, childnum): # create, bind. listen listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # re-use the port listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # put listening socket into non-blocking mode listen_sock.setblocking(0) listen_sock.bind((host, port)) listen_sock.listen(BACKLOG) print('Listening on port %d ...' % port) # read, write, exception lists with sockets to poll main_rlist, wlist, elist = [listen_sock], [], [] # prefork children for index in range(childnum): create_child(index, listen_sock) # watch the socket main_rlist.append(CHILDREN[index]['pipe']) FREE_CHILD_COUNT = childnum while True: # read list with sockets to poll rlist = main_rlist.copy() # if we don't have a free child, stop accepting connections # (although the kernel will still be queueing up new connections # because of the BACKLOG) if FREE_CHILD_COUNT == 0: rlist.remove(listen_sock) # block in select readables, writables, exceptions = select.select(rlist, wlist, elist) if listen_sock in readables: # new client connection, we can accept now try: conn, client_address = listen_sock.accept() except __HOLE__ as e: code, msg = e.args if code == errno.EINTR: continue else: raise # find a free child to pass the connection to for child in CHILDREN: if child['status'] == FREE: # free # mark as busy child['status'] = BUSY # pass the connection's descriptor to the child write_fd(child['pipe'], conn.fileno()) # server doesn't need this connection any more conn.close() FREE_CHILD_COUNT -= 1 break else: # this shouldn't happen raise Exception('No free child found') # find newly-available children for child in CHILDREN: child_pipe = child['pipe'] if child_pipe in readables: data = child_pipe.recv(1) if not data: # child terminated raise Exception('Child terminated unexpectedly') child['status'] = FREE # free FREE_CHILD_COUNT += 1
IOError
dataset/ETHPy150Open rspivak/csdesign/server04.py/serve_forever
2,044
@login_required @transaction.atomic def create_invoice(request): pk = request.GET.get('project', None) to_date = request.GET.get('to_date', None) if not (pk and to_date): raise Http404 from_date = request.GET.get('from_date', None) if not request.user.has_perm('crm.generate_project_invoice'): return HttpResponseForbidden('Forbidden') try: to_date = utils.add_timezone( datetime.datetime.strptime(to_date, '%Y-%m-%d')) if from_date: from_date = utils.add_timezone( datetime.datetime.strptime(from_date, '%Y-%m-%d')) except (__HOLE__, OverflowError): raise Http404 project = get_object_or_404(Project, pk=pk) initial = { 'project': project, 'user': request.user, 'from_date': from_date, 'to_date': to_date, } entries_query = { 'status': Entry.APPROVED, 'end_time__lt': to_date + relativedelta(days=1), 'project__id': project.id } if from_date: entries_query.update({'end_time__gte': from_date}) invoice_form = InvoiceForm(request.POST or None, initial=initial) if request.POST and invoice_form.is_valid(): entries = Entry.no_join.filter(**entries_query) if entries.exists(): # LOCK the entries until our transaction completes - nobody # else will be able to lock or change them - see # https://docs.djangoproject.com/en/1.4/ref/models/querysets/#select-for-update # (This feature requires Django 1.4.) # If more than one request is trying to create an invoice from # these same entries, then the second one to get to this line will # throw a DatabaseError. That can happen if someone double-clicks # the Create Invoice button. try: entries.select_for_update(nowait=True) except DatabaseError: # Whoops, we lost the race messages.add_message(request, messages.ERROR, "Lock error trying to get entries") else: # We got the lock, we can carry on invoice = invoice_form.save() Entry.no_join.filter(pk__in=entries).update( status=invoice.status, entry_group=invoice) messages.add_message(request, messages.INFO, "Invoice created") return HttpResponseRedirect(reverse('view_invoice', args=[invoice.pk])) else: messages.add_message(request, messages.ERROR, "No entries for invoice") else: entries = Entry.objects.filter(**entries_query) entries = entries.order_by('start_time') if not entries: raise Http404 billable_entries = entries.filter(activity__billable=True) \ .select_related() nonbillable_entries = entries.filter(activity__billable=False) \ .select_related() return render(request, 'timepiece/invoice/create.html', { 'invoice_form': invoice_form, 'billable_entries': billable_entries, 'nonbillable_entries': nonbillable_entries, 'project': project, 'billable_totals': HourGroup.objects.summaries(billable_entries), 'nonbillable_totals': HourGroup.objects.summaries(nonbillable_entries), 'from_date': from_date, 'to_date': to_date, })
ValueError
dataset/ETHPy150Open caktus/django-timepiece/timepiece/contracts/views.py/create_invoice
2,045
def flush(self, n=4096): """ Flush `n` bytes of data from the reader Stream to the writer Stream. Returns the number of bytes that were actually flushed. A return value of zero is not an error. If EOF has been reached, `None` is returned. """ try: read = self.from_stream.read(n) if read is None or len(read) == 0: self.eof = True if self.propagate_close: self.to_stream.close() return None return self.to_stream.write(read) except __HOLE__ as e: if e.errno != errno.EPIPE: raise e
OSError
dataset/ETHPy150Open d11wtq/dockerpty/dockerpty/io.py/Pump.flush
2,046
def get_available_name(self, name): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ # If the filename already exists, keep adding an underscore to the name # of the file until the filename doesn't exist. while self.exists(name): try: dot_index = name.rindex('.') except __HOLE__: # filename has no dot name += '_' else: name = name[:dot_index] + '_' + name[dot_index:] return name
ValueError
dataset/ETHPy150Open dcramer/django-compositepks/django/core/files/storage.py/Storage.get_available_name
2,047
def _save(self, name, content): full_path = self.path(name) directory = os.path.dirname(full_path) if not os.path.exists(directory): os.makedirs(directory) elif not os.path.isdir(directory): raise IOError("%s exists and is not a directory." % directory) # There's a potential race condition between get_available_name and # saving the file; it's possible that two threads might return the # same name, at which point all sorts of fun happens. So we need to # try to create the file, but if it already exists we have to go back # to get_available_name() and try again. while True: try: # This file has a file path that we can move. if hasattr(content, 'temporary_file_path'): file_move_safe(content.temporary_file_path(), full_path) content.close() # This is a normal uploadedfile that we can stream. else: # This fun binary flag incantation makes os.open throw an # OSError if the file already exists before we open it. fd = os.open(full_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0)) try: locks.lock(fd, locks.LOCK_EX) for chunk in content.chunks(): os.write(fd, chunk) finally: locks.unlock(fd) os.close(fd) except __HOLE__, e: if e.errno == errno.EEXIST: # Ooops, the file exists. We need a new file name. name = self.get_available_name(name) full_path = self.path(name) else: raise else: # OK, the file save worked. Break out of the loop. break if settings.FILE_UPLOAD_PERMISSIONS is not None: os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS) return name
OSError
dataset/ETHPy150Open dcramer/django-compositepks/django/core/files/storage.py/FileSystemStorage._save
2,048
def path(self, name): try: path = safe_join(self.location, name) except __HOLE__: raise SuspiciousOperation("Attempted access to '%s' denied." % name) return os.path.normpath(path)
ValueError
dataset/ETHPy150Open dcramer/django-compositepks/django/core/files/storage.py/FileSystemStorage.path
2,049
def get_storage_class(import_path): try: dot = import_path.rindex('.') except ValueError: raise ImproperlyConfigured("%s isn't a storage module." % import_path) module, classname = import_path[:dot], import_path[dot+1:] try: mod = __import__(module, {}, {}, ['']) except __HOLE__, e: raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e)) try: return getattr(mod, classname) except AttributeError: raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname))
ImportError
dataset/ETHPy150Open dcramer/django-compositepks/django/core/files/storage.py/get_storage_class
2,050
def replace_wikiwords(value): def replace_wikiword(m): slug = m.group(1) try: page = WikiPage.objects.get(slug=slug) kwargs = { 'slug': slug, } url = reverse('wakawaka_page', kwargs=kwargs) return r'<a href="%s">%s</a>' % (url, slug) except __HOLE__: kwargs = { 'slug': slug, } url = reverse('wakawaka_edit', kwargs=kwargs) return r'<a class="doesnotexist" href="%s">%s</a>' % (url, slug) return mark_safe(WIKI_WORDS_REGEX.sub(replace_wikiword, value))
ObjectDoesNotExist
dataset/ETHPy150Open bartTC/django-wakawaka/wakawaka/templatetags/wakawaka_tags.py/replace_wikiwords
2,051
def get_mapper(): """Returns the mapper.""" global _CACHED_MAPPER if not _CACHED_MAPPER: name = getattr(settings, 'MULTITENANT_MAPPER_CLASS', None) if not name: raise ImproperlyConfigured("You must specify MULTITENANT_MAPPER_CLASS in settings.") try: module_path, member_name = name.rsplit(".", 1) module = import_module(module_path) cls = getattr(module, member_name) except (ValueError, __HOLE__, AttributeError), e: raise ImportError("Could not import mapper: %s: %s" % (name, e)) if not issubclass(cls, TenantMapper): raise ImproperlyConfigured('%s does not subclass db_multitenant.mapper.TenantMapper', name) _CACHED_MAPPER = cls() return _CACHED_MAPPER
ImportError
dataset/ETHPy150Open mik3y/django-db-multitenant/db_multitenant/utils.py/get_mapper
2,052
def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except __HOLE__: return except BaseException: logger.exception("Error in event callback for %r", event)
KeyboardInterrupt
dataset/ETHPy150Open spotify/luigi/luigi/task.py/Task.trigger_event
2,053
def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except __HOLE__: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s]
TypeError
dataset/ETHPy150Open spotify/luigi/luigi/task.py/getpaths
2,054
def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable iterator = iter(struct) except __HOLE__: return [struct] for result in iterator: flat += flatten(result) return flat
TypeError
dataset/ETHPy150Open spotify/luigi/luigi/task.py/flatten
2,055
@extension(EXT_D) def d_hook(self, node): # create the compilation task: cpp or cc task = self.create_task(self.generate_headers and 'd_with_header' or 'd') try: obj_ext = self.obj_ext except __HOLE__: obj_ext = '_%d.o' % self.idx task.inputs = [node] task.outputs = [node.change_ext(obj_ext)] self.compiled_tasks.append(task) if self.generate_headers: header_node = node.change_ext(self.env['DHEADER_ext']) task.outputs += [header_node]
AttributeError
dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Tools/d.py/d_hook
2,056
def handle(self, *args, **options): try: build_path, version, build_number = args except __HOLE__: raise CommandError('Usage: %s\n%s' % (self.args, self.help)) try: build_number = int(build_number) except ValueError: raise CommandError("Build Number %r is not an integer" % build_number) try: CommCareBuild.create_from_zip(build_path, version, build_number) except Exception as e: raise CommandError("%s" % e) self.stdout.write('Build %s #%s created\n' % (version, build_number)) self.stdout.write('You can see a list of builds at [your-server]/builds/\n')
ValueError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/builds/management/commands/add_commcare_build.py/Command.handle
2,057
def receive_request(self, request): """ :type request: Request :param request: the incoming request :rtype : Transaction """ logger.debug("receive_request - " + str(request)) try: host, port = request.source except __HOLE__: return key_mid = hash(str(host) + str(port) + str(request.mid)) key_token = hash(str(host) + str(port) + str(request.token)) if key_mid in self._transactions.keys(): # Duplicated self._transactions[key_mid].request.duplicated = True transaction = self._transactions[key_mid] else: request.timestamp = time.time() transaction = Transaction(request=request, timestamp=request.timestamp) with transaction: self._transactions[key_mid] = transaction self._transactions_token[key_token] = transaction return transaction
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.receive_request
2,058
def receive_response(self, response): """ :type response: Response :param response: :rtype : Transaction """ logger.debug("receive_response - " + str(response)) try: host, port = response.source except __HOLE__: return key_mid = hash(str(host) + str(port) + str(response.mid)) key_mid_multicast = hash(str(defines.ALL_COAP_NODES) + str(port) + str(response.mid)) key_token = hash(str(host) + str(port) + str(response.token)) key_token_multicast = hash(str(defines.ALL_COAP_NODES) + str(port) + str(response.token)) if key_mid in self._transactions.keys(): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in self._transactions.keys(): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming response message " + str(host) + ":" + str(port)) return None, False send_ack = False if response.type == defines.Types["CON"]: send_ack = True transaction.request.acknowledged = True transaction.completed = True transaction.response = response if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction, send_ack
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.receive_response
2,059
def receive_empty(self, message): """ :type message: Message :param message: :rtype : Transaction """ logger.debug("receive_empty - " + str(message)) try: host, port = message.source except __HOLE__: return key_mid = hash(str(host) + str(port) + str(message.mid)) key_mid_multicast = hash(str(defines.ALL_COAP_NODES) + str(port) + str(message.mid)) key_token = hash(str(host) + str(port) + str(message.token)) key_token_multicast = hash(str(defines.ALL_COAP_NODES) + str(port) + str(message.token)) if key_mid in self._transactions.keys(): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in self._transactions.keys(): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port)) return None if message.type == defines.Types["ACK"]: if not transaction.request.acknowledged: transaction.request.acknowledged = True elif not transaction.response.acknowledged: transaction.response.acknowledged = True elif message.type == defines.Types["RST"]: if not transaction.request.acknowledged: transaction.request.rejected = True elif not transaction.response.acknowledged: transaction.response.rejected = True if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.receive_empty
2,060
def send_request(self, request): """ :type request: Request :param request: """ logger.debug("send_request - " + str(request)) assert isinstance(request, Request) try: host, port = request.destination except __HOLE__: return request.timestamp = time.time() transaction = Transaction(request=request, timestamp=request.timestamp) if transaction.request.type is None: transaction.request.type = defines.Types["CON"] if transaction.request.mid is None: transaction.request.mid = self._current_mid self._current_mid += 1 % 65535 key_mid = hash(str(host) + str(port) + str(request.mid)) self._transactions[key_mid] = transaction key_token = hash(str(host) + str(port) + str(request.token)) self._transactions_token[key_token] = transaction return self._transactions[key_mid]
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.send_request
2,061
def send_response(self, transaction): """ :type transaction: Transaction :param transaction: """ logger.debug("send_response - " + str(transaction.response)) if transaction.response.type is None: if transaction.request.type == defines.Types["CON"] and not transaction.request.acknowledged: transaction.response.type = defines.Types["ACK"] transaction.response.mid = transaction.request.mid transaction.response.acknowledged = True transaction.completed = True elif transaction.request.type == defines.Types["NON"]: transaction.response.type = defines.Types["NON"] else: transaction.response.type = defines.Types["CON"] if transaction.response.mid is None: transaction.response.mid = self._current_mid self._current_mid += 1 % 65535 try: host, port = transaction.response.destination except __HOLE__: return key_mid = hash(str(host) + str(port) + str(transaction.response.mid)) self._transactions[key_mid] = transaction transaction.request.acknowledged = True return transaction
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.send_response
2,062
def send_empty(self, transaction, related, message): """ :param transaction: :type message: Message :param message: """ logger.debug("send_empty - " + str(message)) if transaction is None: try: host, port = message.destination except __HOLE__: return key_mid = hash(str(host) + str(port) + str(message.mid)) key_token = hash(str(host) + str(port) + str(message.token)) if key_mid in self._transactions: transaction = self._transactions[key_mid] related = transaction.response elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] related = transaction.response else: return message if message.type == defines.Types["ACK"]: if transaction.request == related: transaction.request.acknowledged = True transaction.completed = True message._mid = transaction.request.mid message.code = 0 message.token = transaction.request.token message.destination = transaction.request.source elif transaction.response == related: transaction.response.acknowledged = True transaction.completed = True message._mid = transaction.response.mid message.code = 0 message.token = transaction.response.token message.destination = transaction.response.source elif message.type == defines.Types["RST"]: if transaction.request == related: transaction.request.rejected = True message._mid = transaction.request.mid message.code = 0 message.token = transaction.request.token message.destination = transaction.request.source elif transaction.response == related: transaction.response.rejected = True transaction.completed = True message._mid = transaction.response.mid message.code = 0 message.token = transaction.response.token message.destination = transaction.response.source return message
AttributeError
dataset/ETHPy150Open Tanganelli/CoAPthon/coapthon/layers/messagelayer.py/MessageLayer.send_empty
2,063
def signal_name(signum): # Hackety-hack-hack: is there really no better way to reverse lookup the # signal name? If you read this and know a way: please provide a patch :) try: return _signames[signum] except __HOLE__: return 'SIG_UNKNOWN'
KeyError
dataset/ETHPy150Open bretth/django-pq/pq/worker.py/signal_name
2,064
def _install_signal_handlers(self): """Installs signal handlers for handling SIGINT and SIGTERM gracefully. """ def request_force_stop(signum, frame): """Terminates the application (cold shutdown). """ self.log.warning('Cold shut down.') # Take down the horse with the worker if self.horse_pid: msg = 'Taking down horse %d with me.' % self.horse_pid self.log.warning(msg) try: os.kill(self.horse_pid, signal.SIGKILL) except __HOLE__ as e: # ESRCH ("No such process") is fine with us if e.errno != errno.ESRCH: self.log.debug('Horse already down.') raise raise SystemExit() def request_stop(signum, frame): """Stops the current worker loop but waits for child processes to end gracefully (warm shutdown). """ self.log.debug('Got signal %s.' % signal_name(signum)) signal.signal(signal.SIGINT, request_force_stop) signal.signal(signal.SIGTERM, request_force_stop) msg = 'Warm shut down requested.' self.log.warning(msg) # If shutdown is requested in the middle of a job, wait until # finish before shutting down if self.state == 'busy': self._stopped = True self.log.debug('Stopping after current horse is finished. ' 'Press Ctrl+C again for a cold shutdown.') else: raise StopRequested() signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
OSError
dataset/ETHPy150Open bretth/django-pq/pq/worker.py/Worker._install_signal_handlers
2,065
def fork_and_perform_job(self, job): """Spawns a work horse to perform the actual work and passes it a job. The worker will wait for the work horse and make sure it executes within the given timeout bounds, or will end the work horse with SIGALRM. """ child_pid = os.fork() if child_pid == 0: self.main_work_horse(job) else: self._horse_pid = child_pid self.procline('Forked %d at %d' % (child_pid, time.time())) while True: try: os.waitpid(child_pid, 0) break except __HOLE__ as e: # In case we encountered an OSError due to EINTR (which is # caused by a SIGINT or SIGTERM signal during # os.waitpid()), we simply ignore it and enter the next # iteration of the loop, waiting for the child to end. In # any other case, this is some other unexpected OS error, # which we don't want to catch, so we re-raise those ones. if e.errno != errno.EINTR: raise
OSError
dataset/ETHPy150Open bretth/django-pq/pq/worker.py/Worker.fork_and_perform_job
2,066
def perform_job(self, job): """Performs the actual work of a job. Will/should only be called inside the work horse's process. """ self.procline('Processing %s from %s since %s' % ( job.func_name, job.origin, time.time())) # do it this way to avoid the extra sql call through job for q in self.queues: if q.name == job.queue_id: break try: with death_penalty_after(job.timeout or PQ_DEFAULT_JOB_TIMEOUT): rv = job.perform() # Pickle the result in the same try-except block since we need to # use the same exc handling when pickling fails job.result = rv job.status = Job.FINISHED job.ended_at = now() job.result_ttl = job.get_ttl(self.default_result_ttl) if job.result_ttl > 0: ttl = timedelta(seconds=job.result_ttl) job.expired_at = job.ended_at + ttl if job.result_ttl != 0: job.save() else: job.delete() except: job.status = Job.FAILED job.save() if job.flow_id: Flow.handle_failed(job, q) self.handle_exception(job, *sys.exc_info()) return False if q.serial: q.release_lock() if rv is None: self.log.info('Job OK') else: # the six u doesnt seem compatible # with converting an integer try: msg = unicode(rv) except __HOLE__: msg = str(rv) self.log.info('Job OK, result = %s' % (yellow(msg),)) if job.flow_id: Flow.handle_result(job, q) if job.result_ttl == 0: self.log.info('Result discarded immediately.') elif job.result_ttl > 0: self.log.info('Result is kept for %d seconds.' % job.result_ttl) else: self.log.warning('Result will never expire, clean up result key manually.') return True
NameError
dataset/ETHPy150Open bretth/django-pq/pq/worker.py/Worker.perform_job
2,067
def get_all_coverage_modules(app_module): """ Returns all possible modules to report coverage on, even if they aren't loaded. """ # We start off with the imported models.py, so we need to import # the parent app package to find the path. app_path = app_module.__name__.split('.')[:-1] app_package = __import__('.'.join(app_path), {}, {}, app_path[-1]) app_dirpath = app_package.__path__[-1] mod_list = [] for root, dirs, files in os.walk(app_dirpath): root_path = app_path + root[len(app_dirpath):].split(os.path.sep)[1:] excludes = getattr(settings, 'EXCLUDE_FROM_COVERAGE', []) if app_path[0] not in excludes: for file in files: if file.lower().endswith('.py'): mod_name = file[:-3].lower() try: mod = __import__('.'.join(root_path + [mod_name]), {}, {}, mod_name) except __HOLE__: pass else: mod_list.append(mod) return mod_list
ImportError
dataset/ETHPy150Open garethr/django-test-extensions/src/test_extensions/testrunners/codecoverage.py/get_all_coverage_modules
2,068
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[], nodatabase=False, xml_out=False, callgraph=False, html_only=False): """ Test runner which displays a code coverage report at the end of the run. """ cov = coverage.coverage() cov.erase() cov.use_cache(0) test_labels = test_labels or getattr(settings, "TEST_APPS", None) cover_branch = getattr(settings, "COVERAGE_BRANCH_COVERAGE", False) cov = coverage.coverage(branch=cover_branch, cover_pylib=False) cov.use_cache(0) coverage_modules = [] if test_labels: for label in test_labels: # Don't report coverage if you're only running a single # test case. if '.' not in label: app = get_app(label) coverage_modules.extend(get_all_coverage_modules(app)) else: for app in get_apps(): coverage_modules.extend(get_all_coverage_modules(app)) morfs = filter(is_wanted_module, coverage_modules) if callgraph: try: import pycallgraph #_include = [i.__name__ for i in coverage_modules] _included = getattr(settings, "COVERAGE_INCLUDE_MODULES", []) _excluded = getattr(settings, "COVERAGE_EXCLUDE_MODULES", []) _included = [i.strip('*')+'*' for i in _included] _excluded = [i.strip('*')+'*' for i in _included] _filter_func = pycallgraph.GlobbingFilter( include=_included or ['*'], #include=['lotericas.*'], #exclude=[], #max_depth=options.max_depth, ) pycallgraph_enabled = True except __HOLE__: pycallgraph_enabled = False else: pycallgraph_enabled = False cov.start() if pycallgraph_enabled: pycallgraph.start_trace(filter_func=_filter_func) if nodatabase: results = nodatabase_run_tests(test_labels, verbosity, interactive, extra_tests) else: results = django_test_runner(test_labels, verbosity, interactive, extra_tests) if callgraph and pycallgraph_enabled: pycallgraph.stop_trace() cov.stop() if getattr(settings, "COVERAGE_HTML_REPORT", False) or \ os.environ.get("COVERAGE_HTML_REPORT"): output_dir = getattr(settings, "COVERAGE_HTML_DIRECTORY", "covhtml") report_method = curry(cov.html_report, directory=output_dir) if callgraph and pycallgraph_enabled: callgraph_path = output_dir + '/' + 'callgraph.png' pycallgraph.make_dot_graph(callgraph_path) print >>sys.stdout print >>sys.stdout, "Coverage HTML reports were output to '%s'" %output_dir if callgraph: if pycallgraph_enabled: print >>sys.stdout, "Call graph was output to '%s'" %callgraph_path else: print >>sys.stdout, "Call graph was not generated: Install 'pycallgraph' module to do so" else: report_method = cov.report if coverage_modules: if xml_out: # using the same output directory as the --xml function uses for testing if not os.path.isdir(os.path.join("temp", "xml")): os.makedirs(os.path.join("temp", "xml")) output_filename = 'temp/xml/coverage_output.xml' cov.xml_report(morfs=coverage_modules, outfile=output_filename) if not html_only: cov.report(coverage_modules, show_missing=1) return results
ImportError
dataset/ETHPy150Open garethr/django-test-extensions/src/test_extensions/testrunners/codecoverage.py/run_tests
2,069
def __init__(self, config): self._configure(config) self._log_handlers = [] # Parse command line args self.config['server_number'] += self.config['args']['server_number'] self.config['procname'] += "_{}".format(self.config['server_number']) # setup all our log handlers for log_cfg in self.config['loggers']: if log_cfg['type'] == "StreamHandler": kwargs = dict(stream=sys.stdout) else: kwargs = dict() handler = getattr(logging, log_cfg['type'])(**kwargs) log_level = getattr(logging, log_cfg['level'].upper()) handler.setLevel(log_level) fmt = log_cfg.get('format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s') formatter = logging.Formatter(fmt) handler.setFormatter(formatter) self._log_handlers.append((log_cfg.get('listen'), handler)) self.logger = self.register_logger(self.__class__.__name__) setproctitle.setproctitle(self.config['procname']) self.version = powerpool.__version__ self.version_info = powerpool.__version_info__ self.sha = getattr(powerpool, '__sha__', "unknown") self.rev_date = getattr(powerpool, '__rev_date__', "unknown") if self.sha == "unknown": # try and fetch the git version information try: output = subprocess.check_output("git show -s --format='%ci %h'", shell=True).strip().rsplit(" ", 1) self.sha = output[1] self.rev_date = output[0] # celery won't work with this, so set some default except Exception as e: self.logger.info("Unable to fetch git hash info: {}".format(e)) self.algos = {} self.server_start = datetime.datetime.utcnow() self.logger.info("=" * 80) self.logger.info("PowerPool stratum server ({}) starting up..." .format(self.config['procname'])) if __debug__: self.logger.warn( "Python not running in optimized mode. For better performance " "set enviroment variable PYTHONOPTIMIZE=2") # Only try to detect blocking if running in debug mode. # NOTE: BlockingDetector can cause (rare) PowerPool crashes gevent.spawn(BlockingDetector(raise_exc=False)) # Detect and load all the hash functions we can find for name, algo_data in self.config['algorithms'].iteritems(): self.algos[name] = algo_data.copy() self.algos[name]['name'] = name mod = algo_data['module'] try: self.algos[name]['module'] = import_helper(mod) except __HOLE__: self.algos[name]['module'] = None else: self.logger.info("Enabling {} hashing algorithm from module {}" .format(name, mod)) self.event_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.events_enabled = self.config['events']['enabled'] if self.events_enabled: self.logger.info("Transmitting statsd formatted stats to {}:{}".format( self.config['events']['host'], self.config['events']['port'])) self.events_address = (self.config['events']['host'].encode('utf8'), self.config['events']['port']) # Setup all our stat managers self._min_stat_counters = [] self._sec_stat_counters = [] if self.config['datagram']['enabled']: listener = (self.config['datagram']['host'], self.config['datagram']['port'] + self.config['server_number']) self.logger.info("Turning on UDP control server on {}" .format(listener)) DatagramServer.__init__(self, listener, spawn=None)
ImportError
dataset/ETHPy150Open simplecrypto/powerpool/powerpool/main.py/PowerPool.__init__
2,070
def handle(self, data, address): self.logger.info("Recieved new command {}".format(data)) parts = data.split(" ") try: component = self.components[parts[0]] func = getattr(component, parts[1]) kwargs = {} args = [] for arg in parts[2:]: if "=" in arg: k, v = arg.split("=", 1) kwargs[k] = v else: args.append(arg) if kwargs.pop('__spawn', False): gevent.spawn(func, *args, **kwargs) else: func(*args, **kwargs) except __HOLE__: self.logger.warn("Component {} doesn't have a method {}" .format(*parts)) except KeyError: self.logger.warn("Component {} doesn't exist".format(*parts)) except Exception: self.logger.warn("Error in called function {}!".format(data), exc_info=True)
AttributeError
dataset/ETHPy150Open simplecrypto/powerpool/powerpool/main.py/PowerPool.handle
2,071
def select(self): e = xlib.XEvent() while xlib.XPending(self._display): xlib.XNextEvent(self._display, e) # Key events are filtered by the xlib window event # handler so they get a shot at the prefiltered event. if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease): if xlib.XFilterEvent(e, e.xany.window): continue try: dispatch = self._window_map[e.xany.window] except __HOLE__: continue dispatch(e)
KeyError
dataset/ETHPy150Open ardekantur/pyglet/pyglet/canvas/xlib.py/XlibDisplay.select
2,072
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} prefix = get_script_prefix() if not isinstance(viewname, six.string_types): view = viewname else: parts = viewname.split(':') parts.reverse() view = parts[0] path = parts[1:] if current_app: current_path = current_app.split(':') current_path.reverse() else: current_path = None resolved_path = [] ns_pattern = '' while path: ns = path.pop() current_ns = current_path.pop() if current_path else None # Lookup the name to see if it could be an app identifier. try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver. if current_ns and current_ns in app_list: # If we are reversing for a particular app, use that # namespace. ns = current_ns elif ns not in app_list: # The name isn't shared by one of the instances (i.e., # the default) so pick the first instance as the default. ns = app_list[0] except KeyError: pass if ns != current_ns: current_path = None try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra except __HOLE__ as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path)) ) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver(ns_pattern, resolver) return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
KeyError
dataset/ETHPy150Open django/django/django/urls/base.py/reverse
2,073
def clear_script_prefix(): """ Unset the script prefix for the current thread. """ try: del _prefixes.value except __HOLE__: pass
AttributeError
dataset/ETHPy150Open django/django/django/urls/base.py/clear_script_prefix
2,074
def ajax_csv_importer(request): save = True if request.POST.get('save', False) else False raw_csv_data = request.POST.get('csv-data', '') primary_attr = request.POST.get('primary-attr', 'hostname') @transaction.commit_manually def do_csv_import(data): try: return csv_import(data, primary_attr=primary_attr, save=save) except __HOLE__, e: transaction.rollback() return {'error': e.messages} except Exception, e: transaction.rollback() return {'error': ['Error: ' + e.message]} finally: transaction.commit() result = do_csv_import(raw_csv_data) attrs = [field.name for field in System._meta.fields] return render(request, 'csv/ajax_csv_importer.html', { 'attrs': attrs, 'result': result, 'getattr': getattr, 'save': save, 'len': len }, status=200 if 'error' not in result else 400)
ValidationError
dataset/ETHPy150Open mozilla/inventory/mcsv/views.py/ajax_csv_importer
2,075
def perm(accessing_obj, accessed_obj, *args, **kwargs): """ The basic permission-checker. Ignores case. Usage: perm(<permission>) where <permission> is the permission accessing_obj must have in order to pass the lock. If the given permission is part of settings.PERMISSION_HIERARCHY, permission is also granted to all ranks higher up in the hierarchy. If accessing_object is an Object controlled by a Player, the permissions of the Player is used unless the Attribute _quell is set to True on the Object. In this case however, the LOWEST hieararcy-permission of the Player/Object-pair will be used (this is order to avoid Players potentially escalating their own permissions by use of a higher-level Object) """ # this allows the perm_above lockfunc to make use of this function too gtmode = kwargs.pop("_greater_than", False) try: perm = args[0].lower() perms_object = [p.lower() for p in accessing_obj.permissions.all()] except (__HOLE__, IndexError): return False if utils.inherits_from(accessing_obj, "evennia.objects.objects.DefaultObject") and accessing_obj.player: player = accessing_obj.player perms_player = [p.lower() for p in player.permissions.all()] is_quell = player.attributes.get("_quell") if perm in _PERMISSION_HIERARCHY: # check hierarchy without allowing escalation obj->player hpos_target = _PERMISSION_HIERARCHY.index(perm) hpos_player = [hpos for hpos, hperm in enumerate(_PERMISSION_HIERARCHY) if hperm in perms_player] hpos_player = hpos_player and hpos_player[-1] or -1 if is_quell: hpos_object = [hpos for hpos, hperm in enumerate(_PERMISSION_HIERARCHY) if hperm in perms_object] hpos_object = hpos_object and hpos_object[-1] or -1 if gtmode: return hpos_target < min(hpos_player, hpos_object) else: return hpos_target <= min(hpos_player, hpos_object) elif gtmode: return hpos_target < hpos_player else: return hpos_target <= hpos_player elif not is_quell and perm in perms_player: # if we get here, check player perms first, otherwise # continue as normal return True if perm in perms_object: # simplest case - we have direct match return True if perm in _PERMISSION_HIERARCHY: # check if we have a higher hierarchy position hpos_target = _PERMISSION_HIERARCHY.index(perm) return any(1 for hpos, hperm in enumerate(_PERMISSION_HIERARCHY) if hperm in perms_object and hpos_target < hpos) return False
AttributeError
dataset/ETHPy150Open evennia/evennia/evennia/locks/lockfuncs.py/perm
2,076
def dbref(accessing_obj, accessed_obj, *args, **kwargs): """ Usage: dbref(3) This lock type checks if the checking object has a particular dbref. Note that this only works for checking objects that are stored in the database (e.g. not for commands) """ if not args: return False try: dbref = int(args[0].strip().strip('#')) except __HOLE__: return False if hasattr(accessing_obj, 'dbid'): return dbref == accessing_obj.dbid return False
ValueError
dataset/ETHPy150Open evennia/evennia/evennia/locks/lockfuncs.py/dbref
2,077
def holds(accessing_obj, accessed_obj, *args, **kwargs): """ Usage: holds() checks if accessed_obj or accessed_obj.obj is held by accessing_obj holds(key/dbref) checks if accessing_obj holds an object with given key/dbref holds(attrname, value) checks if accessing_obj holds an object with the given attrname and value This is passed if accessed_obj is carried by accessing_obj (that is, accessed_obj.location == accessing_obj), or if accessing_obj itself holds an object matching the given key. """ try: # commands and scripts don't have contents, so we are usually looking # for the contents of their .obj property instead (i.e. the object the # command/script is attached to). contents = accessing_obj.contents except AttributeError: try: contents = accessing_obj.obj.contents except __HOLE__: return False def check_holds(objid): # helper function. Compares both dbrefs and keys/aliases. objid = str(objid) dbref = utils.dbref(objid, reqhash=False) if dbref and any((True for obj in contents if obj.dbid == dbref)): return True objid = objid.lower() return any((True for obj in contents if obj.key.lower() == objid or objid in [al.lower() for al in obj.aliases.all()])) if not args: # holds() - check if accessed_obj or accessed_obj.obj is held by accessing_obj try: if check_holds(accessed_obj.dbid): return True except Exception: pass return hasattr(accessed_obj, "obj") and check_holds(accessed_obj.obj.dbid) if len(args) == 1: # command is holds(dbref/key) - check if given objname/dbref is held by accessing_ob return check_holds(args[0]) elif len(args = 2): # command is holds(attrname, value) check if any held object has the given attribute and value for obj in contents: if obj.attributes.get(args[0]) == args[1]: return True
AttributeError
dataset/ETHPy150Open evennia/evennia/evennia/locks/lockfuncs.py/holds
2,078
def add_dev(self, dev): """ Add a device to the ring. This device dict should have a minimum of the following keys: ====== =============================================================== id unique integer identifier amongst devices. Defaults to the next id if the 'id' key is not provided in the dict weight a float of the relative weight of this device as compared to others; this indicates how many partitions the builder will try to assign to this device region integer indicating which region the device is in zone integer indicating which zone the device is in; a given partition will not be assigned to multiple devices within the same (region, zone) pair if there is any alternative ip the ip address of the device port the tcp port of the device device the device's name on disk (sdb1, for example) meta general use 'extra' field; for example: the online date, the hardware description ====== =============================================================== .. note:: This will not rebalance the ring immediately as you may want to make multiple changes for a single rebalance. :param dev: device dict :returns: id of device (not used in the tree anymore, but unknown users may depend on it) """ if 'id' not in dev: dev['id'] = 0 if self.devs: try: dev['id'] = self.devs.index(None) except __HOLE__: dev['id'] = len(self.devs) if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None: raise exceptions.DuplicateDeviceError( 'Duplicate device id: %d' % dev['id']) # Add holes to self.devs to ensure self.devs[dev['id']] will be the dev while dev['id'] >= len(self.devs): self.devs.append(None) dev['weight'] = float(dev['weight']) dev['parts'] = 0 self.devs[dev['id']] = dev self.devs_changed = True self.version += 1 return dev['id']
ValueError
dataset/ETHPy150Open openstack/swift/swift/common/ring/builder.py/RingBuilder.add_dev
2,079
def _build_dispersion_graph(self, old_replica2part2dev=None): """ Build a dict of all tiers in the cluster to a list of the number of parts with a replica count at each index. The values of the dict will be lists of length the maximum whole replica + 1 so that the graph[tier][3] is the number of parts with in the tier with 3 replicas and graph [tier][0] is the number of parts not assigned in this tier. i.e. { <tier>: [ <number_of_parts_with_0_replicas>, <number_of_parts_with_1_replicas>, ... <number_of_parts_with_n_replicas>, ], ... } :param old_replica2part2dev: if called from rebalance, the old_replica2part2dev can be used to count moved moved parts. :returns: number of parts with different assignments than old_replica2part2dev if provided """ # Since we're going to loop over every replica of every part we'll # also count up changed_parts if old_replica2part2dev is passed in old_replica2part2dev = old_replica2part2dev or [] # Compare the partition allocation before and after the rebalance # Only changed device ids are taken into account; devices might be # "touched" during the rebalance, but actually not really moved changed_parts = 0 int_replicas = int(math.ceil(self.replicas)) max_allowed_replicas = self._build_max_replicas_by_tier() parts_at_risk = 0 dispersion_graph = {} # go over all the devices holding each replica part by part for part_id, dev_ids in enumerate( six.moves.zip(*self._replica2part2dev)): # count the number of replicas of this part for each tier of each # device, some devices may have overlapping tiers! replicas_at_tier = defaultdict(int) for rep_id, dev in enumerate(iter( self.devs[dev_id] for dev_id in dev_ids)): for tier in (dev.get('tiers') or tiers_for_dev(dev)): replicas_at_tier[tier] += 1 # IndexErrors will be raised if the replicas are increased or # decreased, and that actually means the partition has changed try: old_device = old_replica2part2dev[rep_id][part_id] except __HOLE__: changed_parts += 1 continue if old_device != dev['id']: changed_parts += 1 part_at_risk = False # update running totals for each tiers' number of parts with a # given replica count for tier, replicas in replicas_at_tier.items(): if tier not in dispersion_graph: dispersion_graph[tier] = [self.parts] + [0] * int_replicas dispersion_graph[tier][0] -= 1 dispersion_graph[tier][replicas] += 1 if replicas > max_allowed_replicas[tier]: part_at_risk = True # this part may be at risk in multiple tiers, but we only count it # as at_risk once if part_at_risk: parts_at_risk += 1 self._dispersion_graph = dispersion_graph self.dispersion = 100.0 * parts_at_risk / self.parts return changed_parts
IndexError
dataset/ETHPy150Open openstack/swift/swift/common/ring/builder.py/RingBuilder._build_dispersion_graph
2,080
@classmethod def load(cls, builder_file, open=open): """ Obtain RingBuilder instance of the provided builder file :param builder_file: path to builder file to load :return: RingBuilder instance """ try: fp = open(builder_file, 'rb') except __HOLE__ as e: if e.errno == errno.ENOENT: raise exceptions.FileNotFoundError( 'Ring Builder file does not exist: %s' % builder_file) elif e.errno in [errno.EPERM, errno.EACCES]: raise exceptions.PermissionError( 'Ring Builder file cannot be accessed: %s' % builder_file) else: raise else: with fp: try: builder = pickle.load(fp) except Exception: # raise error during unpickling as UnPicklingError raise exceptions.UnPicklingError( 'Ring Builder file is invalid: %s' % builder_file) if not hasattr(builder, 'devs'): builder_dict = builder builder = RingBuilder(1, 1, 1) builder.copy_from(builder_dict) for dev in builder.devs: # really old rings didn't have meta keys if dev and 'meta' not in dev: dev['meta'] = '' # NOTE(akscram): An old ring builder file don't contain # replication parameters. if dev: if 'ip' in dev: dev.setdefault('replication_ip', dev['ip']) if 'port' in dev: dev.setdefault('replication_port', dev['port']) return builder
IOError
dataset/ETHPy150Open openstack/swift/swift/common/ring/builder.py/RingBuilder.load
2,081
def search_devs(self, search_values): """Search devices by parameters. :param search_values: a dictionary with search values to filter devices, supported parameters are id, region, zone, ip, port, replication_ip, replication_port, device, weight, meta :returns: list of device dicts """ matched_devs = [] for dev in self.devs: if not dev: continue matched = True for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip', 'replication_port', 'device', 'weight', 'meta'): if key in search_values: value = search_values.get(key) if value is not None: if key == 'meta': if value not in dev.get(key): matched = False elif key == 'ip' or key == 'replication_ip': cdev = '' try: cdev = validate_and_normalize_address( dev.get(key, '')) except __HOLE__: pass if cdev != value: matched = False elif dev.get(key) != value: matched = False if matched: matched_devs.append(dev) return matched_devs
ValueError
dataset/ETHPy150Open openstack/swift/swift/common/ring/builder.py/RingBuilder.search_devs
2,082
def datetime_u(s): fmt = "%Y-%m-%dT%H:%M:%S" try: return _strptime(s, fmt) except __HOLE__: try: # strip utc offset if s[-3] == ":" and s[-6] in (' ', '-', '+'): warnings.warn('removing unsupported UTC offset', RuntimeWarning) s = s[:-6] # parse microseconds try: return _strptime(s, fmt + ".%f") except: return _strptime(s, fmt) except ValueError: # strip microseconds (not supported in this platform) if "." in s: warnings.warn('removing unsuppported microseconds', RuntimeWarning) s = s[:s.index(".")] return _strptime(s, fmt)
ValueError
dataset/ETHPy150Open uwdata/termite-data-server/web2py/gluon/contrib/pysimplesoap/helpers.py/datetime_u
2,083
def symmetric_difference(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except __HOLE__: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.__contains__, selfdata): data[elt] = value for elt in ifilterfalse(selfdata.__contains__, otherdata): data[elt] = value return result
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/BaseSet.symmetric_difference
2,084
def difference(self, other): """Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.) """ result = self.__class__() data = result._data try: otherdata = other._data except __HOLE__: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.__contains__, self): data[elt] = value return result # Membership test
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/BaseSet.difference
2,085
def __contains__(self, element): """Report whether an element is a member of a set. (Called in response to the expression `element in self'.) """ try: return element in self._data except __HOLE__: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught return transform() in self._data # Subset and superset test
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/BaseSet.__contains__
2,086
def _update(self, iterable): # The main loop for update() and the subclass __init__() methods. data = self._data # Use the fast update() method when a dictionary is available. if isinstance(iterable, BaseSet): data.update(iterable._data) return value = True if type(iterable) in (list, tuple, xrange): # Optimized: we know that __iter__() and next() can't # raise TypeError, so we can move 'try:' out of the loop. it = iter(iterable) while True: try: for element in it: data[element] = value return except __HOLE__: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value else: # Safe: only catch TypeError where intended for element in iterable: try: data[element] = value except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/BaseSet._update
2,087
def add(self, element): """Add an element to a set. This has no effect if the element is already present. """ try: self._data[element] = True except __HOLE__: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught self._data[transform()] = True
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/Set.add
2,088
def remove(self, element): """Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError. """ try: del self._data[element] except __HOLE__: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught del self._data[transform()]
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/Set.remove
2,089
def discard(self, element): """Remove an element from a set if it is a member. If the element is not a member, do nothing. """ try: self.remove(element) except __HOLE__: pass
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sets.py/Set.discard
2,090
def add_edge(self, u, v, key=None, attr_dict=None, **attr): """Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph. Edge attributes can be specified with keywords or by providing a dictionary with key/value pairs. See examples below. Parameters ---------- u,v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. key : hashable identifier, optional (default=lowest unused integer) Used to distinguish multiedges between a pair of nodes. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with the edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edges_from : add a collection of edges Notes ----- To replace/update edge data, use the optional key argument to identify a unique edge. Otherwise a new edge will be created. NetworkX algorithms designed for weighted graphs cannot use multigraphs directly because it is not clear how to handle multiedge weights. Convert to Graph using edge attribute 'weight' to enable weighted graph algorithms. Examples -------- The following all add the edge e=(1,2) to graph G: >>> G = nx.MultiDiGraph() >>> e = (1,2) >>> G.add_edge(1, 2) # explicit two-node form >>> G.add_edge(*e) # single edge as tuple of two nodes >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container Associate data to edges using keywords: >>> G.add_edge(1, 2, weight=3) >>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0 >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except __HOLE__: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") # add nodes if u not in self.succ: self.succ[u] = {} self.pred[u] = {} self.node[u] = {} if v not in self.succ: self.succ[v] = {} self.pred[v] = {} self.node[v] = {} if v in self.succ[u]: keydict=self.adj[u][v] if key is None: # find a unique integer key # other methods might be better here? key=len(keydict) while key in keydict: key+=1 datadict=keydict.get(key,{}) datadict.update(attr_dict) keydict[key]=datadict else: # selfloops work this way without special treatment if key is None: key=0 datadict={} datadict.update(attr_dict) keydict={key:datadict} self.succ[u][v] = keydict self.pred[v][u] = keydict
AttributeError
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/classes/multidigraph.py/MultiDiGraph.add_edge
2,091
def get_available_name(self, name): """ In order to prevent file overwriting one another this will generate a new filename with the format `YYMMDD.uniquehash.filename.extension` """ # Set the format of our filename filename_format = '{path}/{date}.{filename}' # If the storage engine is S3, call _clean_name() to clean the name try: clean_name = self._clean_name(name) except __HOLE__: clean_name = name # Generate the YYMMDD formatted date date = datetime.now().strftime('%y%m%d') # rsplit the filename on '/' so we have a 2 value list of # the path and filename splitname = clean_name.rsplit('/', 1) # Compile all the relevant strings to generate the full path/filename final_name = filename_format.format( path=splitname[0], date=date, filename=uniqify_filename(splitname[1]) ) return final_name
AttributeError
dataset/ETHPy150Open ofa/connect/open_connect/connect_core/utils/storages.py/AttachmentStorage.get_available_name
2,092
def _convert_colors(colors): """Convert either a list of colors or nested lists of colors to RGB.""" to_rgb = mpl.colors.colorConverter.to_rgb if isinstance(colors, pd.DataFrame): # Convert dataframe return pd.DataFrame({col: colors[col].map(to_rgb) for col in colors}) elif isinstance(colors, pd.Series): return colors.map(to_rgb) else: try: to_rgb(colors[0]) # If this works, there is only one level of colors return list(map(to_rgb, colors)) except __HOLE__: # If we get here, we have nested lists return [list(map(to_rgb, l)) for l in colors]
ValueError
dataset/ETHPy150Open mwaskom/seaborn/seaborn/matrix.py/_convert_colors
2,093
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels=True, yticklabels=True, mask=None): """Initialize the plotting object.""" # We always want to have a DataFrame with semantic information # and an ndarray to pass to matplotlib if isinstance(data, pd.DataFrame): plot_data = data.values else: plot_data = np.asarray(data) data = pd.DataFrame(plot_data) # Validate the mask and convet to DataFrame mask = _matrix_mask(data, mask) # Reverse the rows so the plot looks like the matrix plot_data = plot_data[::-1] data = data.iloc[::-1] mask = mask.iloc[::-1] plot_data = np.ma.masked_where(np.asarray(mask), plot_data) # Get good names for the rows and columns xtickevery = 1 if isinstance(xticklabels, int) and xticklabels > 1: xtickevery = xticklabels xticklabels = _index_to_ticklabels(data.columns) elif xticklabels is True: xticklabels = _index_to_ticklabels(data.columns) elif xticklabels is False: xticklabels = [] ytickevery = 1 if isinstance(yticklabels, int) and yticklabels > 1: ytickevery = yticklabels yticklabels = _index_to_ticklabels(data.index) elif yticklabels is True: yticklabels = _index_to_ticklabels(data.index) elif yticklabels is False: yticklabels = [] else: yticklabels = yticklabels[::-1] # Get the positions and used label for the ticks nx, ny = data.T.shape if xticklabels == []: self.xticks = [] self.xticklabels = [] else: xstart, xend, xstep = 0, nx, xtickevery self.xticks = np.arange(xstart, xend, xstep) + .5 self.xticklabels = xticklabels[xstart:xend:xstep] if yticklabels == []: self.yticks = [] self.yticklabels = [] else: ystart, yend, ystep = (ny - 1) % ytickevery, ny, ytickevery self.yticks = np.arange(ystart, yend, ystep) + .5 self.yticklabels = yticklabels[ystart:yend:ystep] # Get good names for the axis labels xlabel = _index_to_label(data.columns) ylabel = _index_to_label(data.index) self.xlabel = xlabel if xlabel is not None else "" self.ylabel = ylabel if ylabel is not None else "" # Determine good default values for the colormapping self._determine_cmap_params(plot_data, vmin, vmax, cmap, center, robust) # Sort out the annotations if annot is None: annot = False annot_data = None elif isinstance(annot, bool): if annot: annot_data = plot_data else: annot_data = None else: try: annot_data = annot.values[::-1] except __HOLE__: annot_data = annot[::-1] if annot.shape != plot_data.shape: raise ValueError('Data supplied to "annot" must be the same ' 'shape as the data to plot.') annot = True # Save other attributes to the object self.data = data self.plot_data = plot_data self.annot = annot self.annot_data = annot_data self.fmt = fmt self.annot_kws = {} if annot_kws is None else annot_kws self.cbar = cbar self.cbar_kws = {} if cbar_kws is None else cbar_kws self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
AttributeError
dataset/ETHPy150Open mwaskom/seaborn/seaborn/matrix.py/_HeatMapper.__init__
2,094
@property def calculated_linkage(self): try: return self._calculate_linkage_fastcluster() except __HOLE__: return self._calculate_linkage_scipy()
ImportError
dataset/ETHPy150Open mwaskom/seaborn/seaborn/matrix.py/_DendrogramPlotter.calculated_linkage
2,095
def plot_matrix(self, colorbar_kws, xind, yind, **kws): self.data2d = self.data2d.iloc[yind, xind] self.mask = self.mask.iloc[yind, xind] # Try to reorganize specified tick labels, if provided xtl = kws.pop("xticklabels", True) try: xtl = np.asarray(xtl)[xind] except (TypeError, __HOLE__): pass ytl = kws.pop("yticklabels", True) try: ytl = np.asarray(ytl)[yind] except (TypeError, IndexError): pass heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax, cbar_kws=colorbar_kws, mask=self.mask, xticklabels=xtl, yticklabels=ytl, **kws) self.ax_heatmap.yaxis.set_ticks_position('right') self.ax_heatmap.yaxis.set_label_position('right')
IndexError
dataset/ETHPy150Open mwaskom/seaborn/seaborn/matrix.py/ClusterGrid.plot_matrix
2,096
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster, row_linkage, col_linkage, **kws): colorbar_kws = {} if colorbar_kws is None else colorbar_kws self.plot_dendrograms(row_cluster, col_cluster, metric, method, row_linkage=row_linkage, col_linkage=col_linkage) try: xind = self.dendrogram_col.reordered_ind except AttributeError: xind = np.arange(self.data2d.shape[1]) try: yind = self.dendrogram_row.reordered_ind except __HOLE__: yind = np.arange(self.data2d.shape[0]) self.plot_colors(xind, yind, **kws) self.plot_matrix(colorbar_kws, xind, yind, **kws) return self
AttributeError
dataset/ETHPy150Open mwaskom/seaborn/seaborn/matrix.py/ClusterGrid.plot
2,097
def _to_node(self, element, groups=None): try: state = self.NODE_STATE_MAP[ self._findattr(element, "instanceState/name") ] except __HOLE__: state = NodeState.UNKNOWN n = Node( id=self._findtext(element, 'instanceId'), name=self._findtext(element, 'instanceId'), state=state, public_ip=[self._findtext(element, 'dnsName')], private_ip=[self._findtext(element, 'privateDnsName')], driver=self.connection.driver, extra={ 'dns_name': self._findattr(element, "dnsName"), 'instanceId': self._findattr(element, "instanceId"), 'imageId': self._findattr(element, "imageId"), 'private_dns': self._findattr(element, "privateDnsName"), 'status': self._findattr(element, "instanceState/name"), 'keyname': self._findattr(element, "keyName"), 'launchindex': self._findattr(element, "amiLaunchIndex"), 'productcode': [p.text for p in self._findall( element, "productCodesSet/item/productCode" )], 'instancetype': self._findattr(element, "instanceType"), 'launchdatetime': self._findattr(element, "launchTime"), 'availability': self._findattr(element, "placement/availabilityZone"), 'kernelid': self._findattr(element, "kernelId"), 'ramdiskid': self._findattr(element, "ramdiskId"), 'groups': groups } ) return n
KeyError
dataset/ETHPy150Open secondstory/dewpoint/libcloud/drivers/ec2.py/EC2NodeDriver._to_node
2,098
def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0): #regarding the shell argument, see: http://bugs.python.org/issue8557 try: proc = _Popen(args, stdout=stdout, stderr=stderr, shell=(sys.platform == 'win32')) data = proc.communicate()[stream] except __HOLE__: return 1, '' #doubled checked and data = decode_as_string(data, encoding) #communciate calls wait() return proc.returncode, data
OSError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/setuptools/svn_utils.py/_run_command
2,099
@classmethod def load(cls, dirname=''): normdir = os.path.normpath(dirname) code, data = _run_command(['svn', 'info', normdir]) # Must check for some contents, as some use empty directories # in testcases svn_dir = os.path.join(normdir, '.svn') has_svn = (os.path.isfile(os.path.join(svn_dir, 'entries')) or os.path.isfile(os.path.join(svn_dir, 'dir-props')) or os.path.isfile(os.path.join(svn_dir, 'dir-prop-base'))) svn_version = tuple(cls.get_svn_version().split('.')) try: base_svn_version = tuple(int(x) for x in svn_version[:2]) except __HOLE__: base_svn_version = tuple() if not has_svn: return SvnInfo(dirname) if code or not base_svn_version or base_svn_version < (1, 3): warnings.warn(("No SVN 1.3+ command found: falling back " "on pre 1.7 .svn parsing"), DeprecationWarning) return SvnFileInfo(dirname) if base_svn_version < (1, 5): return Svn13Info(dirname) return Svn15Info(dirname)
ValueError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/setuptools/svn_utils.py/SvnInfo.load