Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
8,400
def expln(x): """ This continuous function ensures that the values of the array are always positive. It is ln(x+1)+1 for x >= 0 and exp(x) for x < 0. """ def f(val): if val < 0: # exponential function for x < 0 return exp(val) else: # natural log function for x >= 0 return log(val + 1.0) + 1 try: result = array(list(map(f, x))) except __HOLE__: result = array(f(x)) return result
TypeError
dataset/ETHPy150Open pybrain/pybrain/pybrain/tools/functions.py/expln
8,401
def explnPrime(x): """ This function is the first derivative of the expln function (above). It is needed for the backward pass of the module. """ def f(val): if val < 0: # exponential function for x<0 return exp(val) else: # linear function for x>=0 return 1.0 / (val + 1.0) try: result = array(list(map(f, x))) except __HOLE__: result = array(f(x)) return result
TypeError
dataset/ETHPy150Open pybrain/pybrain/pybrain/tools/functions.py/explnPrime
8,402
def __set__(self, instance, value): if value is not None and isinstance(value, str): try: value = W3CDTF_to_datetime(value) except __HOLE__: raise ValueError("Value must be W3C datetime format") super(W3CDateTime, self).__set__(instance, value)
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/workbook/properties.py/W3CDateTime.__set__
8,403
def fetch_posts(s, newsgroup, last, cid): """Fetch newsgroups posts, using the given nntp object s.""" NUM_POSTS = 10 # we will fetch the last 10 posts (at most) try: start = str(int(last) - NUM_POSTS) _, items = s.xover(start, last) posts = [] for (article_id, subject, author, _, _, _, _, _) in items: _, _, _, text = s.article(article_id) article = [] for l in text: try: article.append(l.decode('utf-8', errors='ignore')) except __HOLE__: logging.exception('error on: %s', l) post_info = [article_id, author.decode('utf-8', errors='ignore'), subject.decode('utf-8', errors='ignore'), len(text), article] posts.append(post_info) if posts: pd = PostData(posts=posts) pd.put() pid = pd.key.id() logging.debug('pid: %s, cid: %s', pid, cid) channel.send_message(cid, json.dumps( {'pid': pid, 'newsgroup': newsgroup})) except: logging.exception('Something went wrong...')
UnicodeDecodeError
dataset/ETHPy150Open GoogleCloudPlatform/appengine-sockets-python-java-go/python_socket_demo/main.py/fetch_posts
8,404
def _load_function(source): """ Returns a function from a module, given a source string of the form: 'module.submodule.subsubmodule.function_name' """ module_string, function_string = source.rsplit('.', 1) modules = [i for i in sys.modules.keys() if 'calliope' in i] # Check if module already loaded, if so, don't re-import it if (module_string in modules): module = sys.modules[module_string] elif ('calliope.' + module_string) in modules: module = sys.modules['calliope.' + module_string] # Else load the module else: try: module = importlib.import_module(module_string) except __HOLE__: module = importlib.import_module('calliope.' + module_string) return getattr(module, function_string)
ImportError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/_load_function
8,405
def plugin_load(name, builtin_module): try: # First try importing as a third-party module func = _load_function(name) except __HOLE__: # ValueError raised if we got a string without '.', # which implies a builtin function, # so we attempt to load from the given module func_string = builtin_module + '.' + name func = _load_function(func_string) return func
ValueError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/plugin_load
8,406
def prev(self, t): """Using the timesteps set of this model instance, return `t-1`, even if the set is not continuous. E.g., if t is [0, 1, 2, 6, 7, 8], model.prev(6) will return 2. """ # Create an index to look up t, and save it for later use try: # Check if _t_index exists self._t_index.name except __HOLE__: self._t_index = pd.Index(self.data._dt.index) # Get the location of t in the index and use it to retrieve # the desired value, raising an error if it's <0 loc = self._t_index.get_loc(t) - 1 if loc >= 0: return self.data._dt.index[loc] else: e = exceptions.ModelError raise e('Attempted to get a timestep earlier than the first one.')
AttributeError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.prev
8,407
def get_option(self, option, x=None, default=None, ignore_inheritance=False): """ Retrieves options from model settings for the given tech, falling back to the default if the option is not defined for the tech. If ``x`` is given, will attempt to use location-specific override from the location matrix first before falling back to model-wide settings. If ``default`` is given, it is used as a fallback if no default value can be found in the regular inheritance chain. If ``default`` is None and the regular inheritance chain defines no default, an error is raised. If ``ignore_inheritance`` is True, the default is immediately used instead of a search through the inheritance chain if the option has not been set for the given tech. If the first segment of the option contains ':', it will be interpreted as implicit tech subsetting: e.g. asking for 'hvac:r1' implicitly uses 'hvac:r1' with the parent 'hvac', even if that has not been defined, to search the option inheritance chain. Examples: * ``model.get_option('ccgt.costs.om_var')`` * ``model.get_option('csp.weight')`` * ``model.get_option('csp.r', x='33')`` * ``model.get_option('ccgt.costs.om_var',\ default='defaults.costs.om_var')`` """ key = (option, x, default, ignore_inheritance) try: result = self.option_cache[key] except __HOLE__: # self._get_option is defined inside __init__ result = self.option_cache[key] = self._get_option(*key) return result
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.get_option
8,408
def initialize_parents(self): o = self.config_model try: self.parents = {i: o.techs[i].parent for i in o.techs.keys() if i != 'defaults'} except __HOLE__: tech = inspect.trace()[-1][0].f_locals['i'] if 'parent' not in list(o.techs[tech].keys()): e = exceptions.ModelError raise e('Technology `' + tech + '` defines no parent!') # Verify that no technologies apart from the default technologies # inherit from 'defaults' for k, v in self.parents.items(): if k not in get_default_techs() and v == 'defaults': e = exceptions.ModelError raise e('Tech `' + k + '` inherits from `defaults` but ' + 'should inherit from a built-in default technology.') # Verify that all parents are themselves actually defined for k, v in self.parents.items(): if v not in list(o.techs.keys()): e = exceptions.ModelError raise e('Parent `' + v + '` of technology `' + k + '` is not defined.')
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.initialize_parents
8,409
def initialize_sets(self): o = self.config_model d = self.data path = o.data_path # # t: Timesteps set # table_t = pd.read_csv(os.path.join(path, 'set_t.csv'), header=None, index_col=1, parse_dates=[1]) table_t.columns = ['t_int'] if self.config_run.get_key('subset_t', default=False): table_t = table_t.loc[self.config_run.subset_t[0]: self.config_run.subset_t[1]] self.slice = slice(table_t.iat[0, 0], table_t.iat[-1, 0] + 1) else: self.slice = slice(None) _t = pd.Series([int(t) for t in table_t['t_int'].tolist()]) d._dt = pd.Series(table_t.index, index=_t.tolist()) # First set time_res_data and time_res_static across all data # `time_res_data` never changes, so always reflects the spacing # of time step indices d.time_res_data = self.get_timeres() # `time_res_static` is updated after time resolution adjustments, # so does not reflect the spacing of time step indicex d.time_res_static = d.time_res_data d.time_res_native = 1 # In the beginning, time_res is native # From time_res_data, initialize time_res_series d.time_res_series = pd.Series(d.time_res_data, index=d._dt.index) # Last index t for which model may still use startup exceptions d.startup_time_bounds = d._dt.index[int(o.startup_time / d.time_res_data)] # # x: Locations set # d._x = list(o.locations.keys()) if self.config_run.get_key('subset_x', default=False): d._x = [x for x in d._x if x in self.config_run.subset_x] # # y: Technologies set # d._y = set() try: for k, v in o.locations.items(): for y in v.techs: if y in o.techs: d._y.add(y) else: e = exceptions.ModelError raise e('Location `{}` ' 'uses undefined tech `{}`.'.format(k, y)) except __HOLE__: e = exceptions.ModelError raise e('The region `' + k + '` does not allow ' 'any technologies via `techs`. Must give ' 'at least one technology per region.') d._y = list(d._y) if self.config_run.get_key('subset_y', default=False): d._y = [y for y in d._y if y in self.config_run.subset_y] # Subset of transmission technologies, if any defined # Used to initialized transmission techs further below # (not yet added to d._y here) if ('links' in o) and (o.links is not None): d._y_transmission = transmission.get_transmission_techs(o.links) d.transmission_y = list(set([list(v.keys())[0] for k, v in o.links.items()])) else: d._y_transmission = [] d.transmission_y = [] # Subset of conversion technologies d._y_conversion = [y for y in d._y if self.ischild(y, of='conversion')] # Subset of supply, demand, storage technologies d._y_pc = [y for y in d._y if not self.ischild(y, of='conversion') or self.ischild(y, of='transmission')] # Subset of technologies that define es_prod/es_con d._y_prod = ([y for y in d._y if not self.ischild(y, of='demand')] + d._y_transmission) d._y_con = ([y for y in d._y if not self.ischild(y, of='supply')] + d._y_transmission) # Subset of technologies that allow rb d._y_rb = [] for x in d._x: if self.get_option(y + '.constraints.allow_rb', x=x) is True: d._y_rb.append(y) break # No need to look at other x # Subset of technologies with parasitics (carrier efficiency != 1.0) d._y_p = [] for x in d._x: if self.get_option(y + '.constraints.c_eff', x=x) != 1.0: d._y_p.append(y) break # No need to look at other x # # Locations settings matrix and transmission technologies # d.locations = locations.generate_location_matrix(o.locations, techs=d._y) # For simplicity, only keep the locations that are actually in set `x` d.locations = d.locations.loc[d._x, :] # # c: Carriers set # d._c = set() for y in d._y: # Only add carriers for allowed technologies d._c.update([self.get_option(y + '.carrier')]) if self.get_option(y + '.source_carrier'): d._c.update([self.get_option(y + '.source_carrier')]) d._c = list(d._c) # # Initialize transmission technologies # self._initialize_transmission() # # self.data._y is now complete, ensure that all techs conform to the # rule that only "head" techs can be used in the model # for y in self.data._y: if self.get_option(y + '.parent') in self.data._y: e = exceptions.ModelError raise e('Only technologies without children can be used ' 'in the model definition ' '({}, {}).'.format(y, self.get_option(y + '.parent'))) # # k: Cost classes set # classes = [list(o.techs[k].costs.keys()) for k in o.techs if k != 'defaults' # Prevent 'default' from entering set if 'costs' in o.techs[k]] # Flatten list and make sure 'monetary' is in it classes = ([i for i in itertools.chain.from_iterable(classes)] + ['monetary']) d._k = list(set(classes)) # Remove any duplicates with a set roundtrip
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.initialize_sets
8,410
def read_data(self): """ Read parameter data from CSV files, if needed. Data that may be defined in CSV files is read before generate_model() so that it only has to be read from disk once, even if generate_model() is repeatedly called. Note on indexing: if subset_t is set, a subset of the data is selected, and all data including their indices are subsetted. d._dt.index maps a simple [0, data length] index to the actual t index used. Data is stored in the `self.data` for each `param` and technology `y`: ``self.data[param][y]`` """ @utils.memoize def _get_option_from_csv(filename): """Read CSV time series""" d_path = os.path.join(self.config_model.data_path, filename) # [self.slice] to do time subset if needed # Results in e.g. d.r_eff['csp'] being a dataframe # of efficiencies for each time step t at location x df = pd.read_csv(d_path, index_col=0)[self.slice] # Fill columns that weren't defined with NaN # missing_cols = list(set(self.data._x) - set(df.columns)) # for c in missing_cols: # df[c] = np.nan # Ensure that the read file's index matches the data's timesteps mismatch = df.index.difference(d._dt.index) if len(mismatch) > 0: e = exceptions.ModelError entries = mismatch.tolist() raise e('File has invalid index. Ensure that it has the same ' 'date range and resolution as set_t.csv: {}.\n\n' 'Invalid entries: {}'.format(filename, entries)) return df d = self.data self.debug.data_sources = utils.AttrDict() # Data for storage initialization parameter d.s_init = pd.DataFrame(index=d._x, columns=d._y_pc) for y in d.s_init.columns: for x in d.s_init.index: d.s_init.at[x, y] = self.get_option(y + '.constraints.s_init', x=x) # Parameters that may be defined over (x, t) for a given technology y d.params = ['r', 'e_eff'] d._y_def_r = set() d._y_def_e_eff = set() # TODO could allow params in d.params to be defined only over # x instead of either static or over (x, t) via CSV! for param in d.params: d[param] = utils.AttrDict() for y in d._y: d[param][y] = pd.DataFrame(np.nan, index=d._dt.index, columns=d._x) # TODO this whole process could be refactored for efficiency # to read files only once, # create a dict of files: {'f1.csv': ['x1', 'x2'], # 'f2.csv': ['x3'], # 'model_config': ['x4, x5']} for x in d._x: # If this y is actually not defined at this x, # and is also not a transmission tech, # continue (but set the param to 0 first) # TODO this is a bit of a hack -- e.g. the extra check # for transmission tech is necessary because we set # e_eff to 0 for all transmission (as transmission techs # don't show up in the config_model.locations[x].techs) # Keep an eye out in case this causes other problems if (y not in self.config_model.locations[x].techs and y not in d._y_transmission): d[param][y].loc[:, x] = 0 continue option = self.get_option(y + '.constraints.' + param, x=x) if (isinstance(option, str) and not option.startswith('file')): e = exceptions.ModelError raise e('Invalid value for `{}.{}.{}`:' ' `{}`'.format(param, y, x, option)) k = param + '.' + y + '.' + x if (isinstance(option, str) and option.startswith('file')): if param == 'r': d._y_def_r.add(y) elif param == 'e_eff': d._y_def_e_eff.add(y) try: # Parse 'file=filename' option f = option.split('=')[1] except IndexError: # If set to just 'file', set filename with y and # param, e.g. 'csp_r_eff.csv' f = y + '_' + param + '.csv' df = _get_option_from_csv(f) # Set up x_map if that option has been set try: x_map = self.get_option(y + '.x_map', x=x) except exceptions.OptionNotSetError: x_map = None # If x_map is available, remap the current col if x_map: # TODO this is a hack and will take up a lot # of memory due to data duplication in case # of a lot of mappings pointing to the same # column in the data # Format is <name in model config>:<name in data> x_map_dict = {i.split(':')[0].strip(): i.split(':')[1].strip() for i in x_map.split(',')} x_map_str = 'x_map: \'{}\''.format(x_map) # Get the mapping for this x from x_map # NB not removing old columns in case # those are also used somewhere! try: x_m = x_map_dict[x] except KeyError: e = exceptions.ModelError raise e('x_map defined but does not map ' 'location defined in model config: ' '{}, with {}'.format(x, x_map_str)) if x_m not in df.columns: e = exceptions.ModelError raise e('Trying to map to to a column not ' 'contained in data: {}, for region ' '{}, with {}' .format(x_m, x, x_map_str)) df[x] = df[x_m] try: d[param][y].loc[:, x] = df[x] self.debug.data_sources.set_key(k, 'file:' + f) except __HOLE__: # If could not be read from file, set it to zero d[param][y].loc[:, x] = 0 # Depending on whether or not the tech is allowed # at this location, set _NA_ for the data source, # or raise an error if self.data.locations.at[x, y] == 0: self.debug.data_sources.set_key(k, '_NA_') else: w = exceptions.ModelWarning message = ('Could not load data for {}, ' 'with given option: ' '{}'.format(k, option)) warnings.warn(message, w) v = 'file:_NOT_FOUND_' self.debug.data_sources.set_key(k, v) else: d[param][y].loc[:, x] = option self.debug.data_sources.set_key(k, 'model_config') if (param == 'r' and option != float('inf')): d._y_def_r.add(y) # Convert power to energy for r, if necessary if param == 'r': r_unit = self.get_option(y + '.constraints.r_unit', x=x) if r_unit == 'power': r_scale = d.time_res_data d[param][y].loc[:, x] = (d[param][y].loc[:, x] * r_scale) # Scale r to a given maximum if necessary scale = self.get_option(y + '.constraints.r_scale_to_peak', x=x) if param == 'r' and scale: scaled = self.scale_to_peak(d[param][y][x], scale) d[param][y].loc[:, x] = scaled ds = self.debug.data_sources missing_data = [kk for kk in ds.keys_nested() if ds.get_key(kk) == 'file:_NOT_FOUND_'] if len(missing_data) > 0: message = ('The following parameter values could not be read ' 'from file. They were automatically set to `0`: ' + ', '.join(missing_data)) warnings.warn(message, exceptions.ModelWarning) # Finally, check data consistency. For now, demand must be <= 0, # and supply >=0, at all times. # FIXME update these checks on implementing conditional param updates. for y in d._y_def_r: base_tech = self.get_parent(y) # Check each column: for c in d.r[y].columns: series = d.r[y][c] err_suffix = 'for tech: {}, at location: {}'.format(y, c) if base_tech == 'demand': err = 'Demand resource must be <=0, ' + err_suffix assert (series <= 0).all(), err elif base_tech == 'supply': err = 'Supply resource must be >=0, ' + err_suffix assert (series >= 0).all(), err
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.read_data
8,411
def add_constraint(self, constraint, *args, **kwargs): try: constraint(self, *args, **kwargs) # If there is an error in a constraint, make sure to also get # the index where the error happened and pass that along except __HOLE__ as e: index = inspect.trace()[-1][0].f_locals['index'] index_string = ', at index: {}'.format(index) if not e.args: e.args = ('',) e.args = (e.args[0] + index_string,) + e.args[1:] # Also log it because that is what Pyomo does, and want to ensure # that the log entry contains the info we added logging.error('Error generating constraint' + index_string) raise
ValueError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.add_constraint
8,412
def _set_t_end(self): # t_end is the timestep previous to t_start + horizon, # because the .loc[start:end] slice includes the end try: self.t_end = self.prev(int(self.t_start + self.config_model.opmode.horizon / self.data.time_res_data)) except __HOLE__: # If t_end is beyond last timestep, cap it to last one, and # log the occurance t_bound = self.data._dt.index[-1] msg = 'Capping t_end to {}'.format(t_bound) logging.debug(msg) self.t_end = t_bound # print('\n\n***\n{}-{}\n***\n\n'.format(self.t_start, self.t_end))
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model._set_t_end
8,413
def solve(self, warmstart=False): """ Args: warmstart : (default False) re-solve an updated model instance Returns: None """ m = self.m cr = self.config_run solver_kwargs = {} if not warmstart: solver_io = cr.get_key('solver_io', default=False) if solver_io: self.opt = popt.SolverFactory(cr.solver, solver_io=solver_io) else: self.opt = popt.SolverFactory(cr.solver) # Set solver options from run_settings file, if it exists try: for k in cr.solver_options.keys_nested(): self.opt.options[k] = cr.solver_options.get_key(k) except KeyError: pass if cr.get_key('debug.symbolic_solver_labels', default=False): solver_kwargs['symbolic_solver_labels'] = True if cr.get_key('debug.keep_temp_files', default=False): solver_kwargs['keepfiles'] = True if self.mode == 'plan': logdir = os.path.join('Logs', self.run_id) elif self.mode == 'operate': logdir = os.path.join('Logs', self.run_id + '_' + str(self.t_start)) if (cr.get_key('debug.overwrite_temp_files', default=False) and os.path.exists(logdir)): shutil.rmtree(logdir) os.makedirs(logdir) TempfileManager.tempdir = logdir def _solve(warmstart, solver_kwargs): warning = None if warmstart: try: results = self.opt.solve(self.m, warmstart=True, tee=True, **solver_kwargs) except __HOLE__ as e: if 'warmstart' in e.args[0]: warning = ('The chosen solver, {}, ' 'does not support warmstart, ' 'which may impact performance.').format(cr.get_key('solver')) results = self.opt.solve(self.m, tee=True, **solver_kwargs) else: results = self.opt.solve(self.m, tee=True, **solver_kwargs) return results, warning if self.verbose: t = datetime.datetime.now().strftime(self.time_format) print('\nModel preprocessing complete at {}\n'.format(t)) if cr.get_key('debug.echo_solver_log', default=False): self.results, warnmsg = _solve(warmstart, solver_kwargs) else: # Silencing output by redirecting stdout and stderr with utils.capture_output() as self.pyomo_output: self.results, warnmsg = _solve(warmstart, solver_kwargs) if warnmsg: warnings.warn(warnmsg, exceptions.ModelWarning) self.load_results()
ValueError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.solve
8,414
def get_var(self, var, dims=None, standardize_coords=True): """ Return output for variable `var` as a pandas.Series (1d), pandas.Dataframe (2d), or xarray.DataArray (3d and higher). Args: var : variable name as string, e.g. 'es_prod' dims : list of indices as strings, e.g. ('y', 'x', 't'); if not given, they are auto-detected """ m = self.m try: var_container = getattr(m, var) except __HOLE__: raise exceptions.ModelError('Variable {} inexistent.'.format(var)) # Get dims if not dims: dims = [i.name for i in var_container.index_set().set_tuple] # Make sure standard coordinate names are used if standardize_coords: dims = [i.split('_')[0] for i in dims] result = pd.DataFrame.from_dict(var_container.get_values(), orient='index') if result.empty: raise exceptions.ModelError('Variable {} has no data.'.format(var)) result.index = pd.MultiIndex.from_tuples(result.index, names=dims) result = result[0] # Get the only column in the dataframe # Unstack and sort by time axis if len(dims) == 1: result = result.sort_index() elif len(dims) == 2: # if len(dims) is 2, we already have a well-formed DataFrame result = result.unstack(level=0) result = result.sort_index() else: # len(dims) >= 3 result = xr.DataArray.from_series(result) # Nicify time axis if 't' in dims: t = getattr(m, 't') if self.t_start is None: new_index = self.data._dt.loc[t.first():t.last()].tolist() else: new_index = self.data._dt.loc[self.t_start:self.t_end].tolist() if len(dims) <= 2: # pandas result.index = new_index else: # xarray result.coords['t'] = new_index return result
AttributeError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.get_var
8,415
def _get_time_res_sum(self): m = self.m time_res = self.data.time_res_series try: # Try loading time_res_sum from operational mode time_res_sum = self.data.time_res_sum except __HOLE__: time_res_sum = sum(time_res.at[t] for t in m.t) return time_res_sum
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model._get_time_res_sum
8,416
def save_solution(self, how): """Save model solution. ``how`` can be 'netcdf' or 'csv'""" if 'path' not in self.config_run.output: self.config_run.output['path'] = 'Output' logging.warning('`config_run.output.path` not set, using default: `Output`') # Create output dir, but ignore if it already exists try: os.makedirs(self.config_run.output.path) except __HOLE__: # Hoping this isn't raised for more serious stuff pass # except KeyError: # likely because `path` or `output` not defined # raise exceptions.ModelError('`config_run.output.path` not configured.') # Add r and e_eff time series alongside the solution additional_params = ['r', 'e_eff'] for param in additional_params: subset_name = '_y_def_' + param if len(self.data[subset_name]) > 0: self.solution[param] = self._get_data_array(param, subset_name) if how == 'netcdf': self._save_netcdf4() elif how == 'csv': self._save_csv() else: raise ValueError('Unsupported value for `how`: {}'.format(how)) # Remove r and e_eff from solution again for param in additional_params: if param in self.solution: del self.solution[param] return None
OSError
dataset/ETHPy150Open calliope-project/calliope/calliope/core.py/Model.save_solution
8,417
def main(self, request_id, path_to_file): self.repository_info, self.tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(self.repository_info, self.tool) api_client, api_root = self.get_api(server_url) request = get_review_request(request_id, api_root) try: with open(path_to_file, 'rb') as f: content = f.read() except __HOLE__: raise CommandError('%s is not a valid file.' % path_to_file) # Check if the user specified a custom filename, otherwise # use the original filename. filename = self.options.filename or os.path.basename(path_to_file) try: request.get_file_attachments().upload_attachment( filename, content, self.options.caption) except APIError as e: raise CommandError('Error uploading file: %s' % e) print('Uploaded %s to review request %s.' % (path_to_file, request_id))
IOError
dataset/ETHPy150Open reviewboard/rbtools/rbtools/commands/attach.py/Attach.main
8,418
def write(self, *args, **kwargs): if kwargs.get('level', SIPLOG_INFO) < self.level: return ltime = kwargs.get('ltime', None) if ltime == None: ltime = time() call_id = kwargs.get('call_id', self.call_id) obuf = '%s.%.3d/%s/%s: %s\n' % (strftime('%d %b %H:%M:%S', localtime(ltime)), \ (ltime % 1) * 1000, call_id, self.app, \ reduce(lambda x, y: x + y, [str(x) for x in args])) try: self.flock(self.log, LOCK_EX) except IOError, e: # Catch ENOTSUP if e.args[0] != 45: raise e self.flock = lambda x, y: None try: self.log.write(obuf) except __HOLE__, e: if e.args[0] != EINTR: raise e self.log.flush() self.flock(self.log, LOCK_UN)
IOError
dataset/ETHPy150Open hgascon/pulsar/pulsar/core/sippy/SipLogger.py/SipLogger.write
8,419
def get_user_info(self): fields = providers.registry \ .by_id(LinkedInProvider.id) \ .get_profile_fields() url = self.url + ':(%s)' % ','.join(fields) raw_xml = self.query(url) if not six.PY3: raw_xml = raw_xml.encode('utf8') try: return self.to_dict(ElementTree.fromstring(raw_xml)) except (ExpatError, __HOLE__, IndexError): return None
KeyError
dataset/ETHPy150Open pennersr/django-allauth/allauth/socialaccount/providers/linkedin/views.py/LinkedInAPI.get_user_info
8,420
def pairwise(iterable): """ Yield pairs of consecutive elements in iterable. >>> list(pairwise('abcd')) [('a', 'b'), ('b', 'c'), ('c', 'd')] """ iterator = iter(iterable) try: a = iterator.next() except __HOLE__: return for b in iterator: yield a, b a = b
StopIteration
dataset/ETHPy150Open SimonSapin/snippets/markov_passwords.py/pairwise
8,421
def parse_encoding(fp): """Deduce the encoding of a source file from magic comment. It does this in the same way as the `Python interpreter`__ .. __: https://docs.python.org/3.4/reference/lexical_analysis.html#encoding-declarations The ``fp`` argument should be a seekable file object. (From Jeff Dairiki) """ pos = fp.tell() fp.seek(0) try: line1 = fp.readline() has_bom = line1.startswith(codecs.BOM_UTF8) if has_bom: line1 = line1[len(codecs.BOM_UTF8):] m = PYTHON_MAGIC_COMMENT_re.match(line1) if not m: try: import parser parser.suite(line1.decode('latin-1')) except (__HOLE__, SyntaxError, UnicodeEncodeError): # Either it's a real syntax error, in which case the source is # not valid python source, or line2 is a continuation of line1, # in which case we don't want to scan line2 for a magic # comment. pass else: line2 = fp.readline() m = PYTHON_MAGIC_COMMENT_re.match(line2) if has_bom: if m: magic_comment_encoding = m.group(1).decode('latin-1') if magic_comment_encoding != 'utf-8': raise SyntaxError( 'encoding problem: {0} with BOM'.format( magic_comment_encoding)) return 'utf-8' elif m: return m.group(1).decode('latin-1') else: return None finally: fp.seek(pos)
ImportError
dataset/ETHPy150Open python-babel/babel/babel/util.py/parse_encoding
8,422
def pop(self, key, default=missing): try: value = dict.pop(self, key) self._keys.remove(key) return value except __HOLE__ as e: if default == missing: raise e else: return default
KeyError
dataset/ETHPy150Open python-babel/babel/babel/util.py/odict.pop
8,423
def _GetBenchmarkSpec(benchmark_config, benchmark_name, benchmark_uid): """Creates a BenchmarkSpec or loads one from a file. During the provision stage, creates a BenchmarkSpec from the provided configuration. During any later stage, loads the BenchmarkSpec that was created during the provision stage from a file. Args: benchmark_config: BenchmarkConfigSpec. The benchmark configuration to use while running the current stage. benchmark_name: string. Name of the benchmark. benchmark_uid: string. Identifies a specific run of a benchmark. Returns: The created or loaded BenchmarkSpec. """ if stages.PROVISION in FLAGS.run_stage: return benchmark_spec.BenchmarkSpec(benchmark_config, benchmark_name, benchmark_uid) else: try: return benchmark_spec.BenchmarkSpec.GetSpecFromFile(benchmark_uid, benchmark_config) except __HOLE__: if FLAGS.run_stage == [stages.PREPARE]: logging.error( 'We were unable to load the BenchmarkSpec. This may be related ' 'to two additional run stages which have recently been added. ' 'Please make sure to run the stage "provision" before "prepare". ' 'Similarly, make sure to run "teardown" after "cleanup".') raise
IOError
dataset/ETHPy150Open GoogleCloudPlatform/PerfKitBenchmarker/perfkitbenchmarker/pkb.py/_GetBenchmarkSpec
8,424
def test_instance(self): ''' Test creating an instance on ProfitBricks ''' # check if instance with salt installed returned try: self.assertIn( INSTANCE_NAME, [i.strip() for i in self.run_cloud( '-p profitbricks-test {0}'.format(INSTANCE_NAME) )] ) except __HOLE__: self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME)) raise # delete the instance try: self.assertIn( INSTANCE_NAME + ':', [i.strip() for i in self.run_cloud( '-d {0} --assume-yes'.format(INSTANCE_NAME) )] ) except AssertionError: raise
AssertionError
dataset/ETHPy150Open saltstack/salt/tests/integration/cloud/providers/profitbricks.py/ProfitBricksTest.test_instance
8,425
def action_logging(user, object_list, action_type, message=None, context=None): """ Add ActionLog using a set of parameters. user: The user that did the action. object_list: A list of objects that should be created the actionlog for. action_type: Label of a type of action from the NoticeType model. message: A message to be included at the actionlog. If no message is passed it will try do render a message using the notice.html from the notification application. context: To render the message using the notification files, sometimes it is necessary to pass some vars by using a context. Usage:: al = 'project_added' context = {'project': object} action_logging(request.user, [object], al , context=context): """ if not getattr(settings, 'ACTIONLOG_ENABLED', None): return if context is None: context = {} if message is None: message = _get_formatted_message(action_type, context) action_type_obj = NoticeType.objects.get(label=action_type) time = datetime.datetime.now() try: for object in object_list: l = LogEntry( user_id = user.pk, content_type = ContentType.objects.get_for_model(object), object_id = object.pk, object_name = force_unicode(object)[:200], action_type = action_type_obj, action_time = time, message = message) l.save() if USE_REDIS: _log_to_queues(object, user.pk, time, message) except __HOLE__: raise TypeError("The 'object_list' parameter must be iterable")
TypeError
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/actionlog/models.py/action_logging
8,426
def main(argv=None): if argv is None: argv = sys.argv parser = E.OptionParser( version="%prog version: $Id$", usage=globals()["__doc__"]) parser.add_option("-g", "--genome-file", dest="genome_file", type="string", help="filename with genome [default=%default].") parser.add_option("-p", "--output-filename-pattern", dest="output_filename_pattern", type="string", help="OUTPUT filename with histogram information on aggregate coverages [%default].") parser.add_option("--read-length-mean", dest="read_length_mean", type="float", help="simulation parameter [default=%default].") parser.add_option("--read-length-std", dest="read_length_stddev", type="float", help="simulation parameter [default=%default].") parser.add_option("--coverage-mean", dest="coverage_mean", type="float", help="simulation parameter [default=%default].") parser.add_option("--coverage-std", dest="coverage_stddev", type="float", help="simulation parameter [default=%default].") parser.add_option("--ds-mean", dest="ds_mean", type="float", help="simulation parameter [default=%default].") parser.add_option("--ds-std", dest="ds_stddev", type="float", help="simulation parameter [default=%default].") parser.add_option("--error-mean", dest="error_mean", type="float", help="simulation parameter [default=%default].") parser.add_option("--error-std", dest="error_stddev", type="float", help="simulation parameter [default=%default].") parser.add_option("--min-read-length", dest="min_read_length", type="int", help="minimum read length [default=%default].") parser.add_option("--sample-size", dest="sample_size", type="int", help="randomly sample from selected transcripts [default=%default].") parser.add_option("--test", dest="test", type="int", help="test with # first entries [default=%default].") parser.add_option("--mode", dest="mode", type="choice", choices=("genes", "transcripts"), help="use genes or transcripts [default=%default].") parser.set_defaults( genome_file=None, read_length_mean=200.0, read_length_stddev=20.0, coverage_mean=2.0, coverage_stddev=1.0, ds_mean=None, ds_stddev=None, error_mean=None, error_stddev=None, min_read_length=50, test=None, mode="transcripts", output_filename_pattern=None, output_format_id="%010i", sample_size=0, ) (options, args) = E.Start(parser, argv) assert options.genome_file, "please supply an indexed genome." if options.output_filename_pattern: outfile_stats = open(options.output_filename_pattern % "stats", "w") outfile_stats.write( "id\tlen\tnreads\tlen_mean\tlen_std\tcov_mean\tcov_std\n") outfile_map = open(options.output_filename_pattern % "map", "w") outfile_map.write("id\ttranscript\n") else: outfile_stats = None outfile_map = None genome = IndexedFasta.IndexedFasta(options.genome_file) ninput, noutput, nskipped = 0, 0, 0 total_counts, total_read_lengths, total_len = [], [], 0 total_pids = [] total_error_pids = [] if options.mode == "transcripts": iterator = GTF.transcript_iterator( GTF.iterator_filtered(GTF.iterator(options.stdin), feature="exon")) getId = lambda x: x.transcript_id elif options.mode == "genes": iterator = GTF.flat_gene_iterator( GTF.iterator_filtered(GTF.iterator(options.stdin), feature="exon")) getId = lambda x: x.gene_id if options.sample_size: iterator = Iterators.sample(iterator) if options.ds_mean: do_mutate = True pid_calc = SequencePairProperties.SequencePairPropertiesPID() else: do_mutate = False if options.error_mean: do_error = True pid_calc = SequencePairProperties.SequencePairPropertiesPID() else: do_error = False for gtfs in iterator: id = getId(gtfs[0]) try: sequence = GTF.toSequence(gtfs, genome) except __HOLE__, msg: if options.loglevel >= 2: options.stdlog.write("# skipping %s: %s\n" % (id, msg)) nskipped += 1 continue lsequence = len(sequence) if lsequence <= options.min_read_length * 2: if options.loglevel >= 2: options.stdlog.write( "# skipping %s - sequence is too short: %i\n" % (id, lsequence)) nskipped += 1 continue ninput += 1 if do_mutate: new_sequence = getMutatedSequence(sequence, options.ds_mean) pid_calc.loadPair(sequence, new_sequence) pid = pid_calc.mPID total_pids.append(pid) sequence = new_sequence else: pid = 100.0 if options.loglevel >= 2: options.stdlog.write( "# processing %s - len=%i\n" % (id, lsequence)) options.stdlog.flush() total_len += lsequence lvsequence = lsequence * \ random.gauss(options.coverage_mean, options.coverage_stddev) covered = 0 counts = numpy.zeros(lsequence) nreads = 0 error_pids, read_lengths = [], [] while covered < lvsequence: read_length = int( random.gauss(options.read_length_mean, options.read_length_stddev)) positive = random.randint(0, 1) if positive: start = random.randint(0, lsequence) end = min(lsequence, start + read_length) else: end = random.randint(0, lsequence) start = max(0, end - read_length) read_length = end - start if read_length < options.min_read_length: continue segment = sequence[start:end] if not positive: segment = Genomics.complement(segment) noutput += 1 if do_error: new_segment = getMutatedSequence(segment, options.error_mean) pid_calc.loadPair(segment, new_segment) pid = pid_calc.mPID error_pids.append(pid) segment = new_segment else: pid = 100.0 options.stdout.write( ">%s\n%s\n" % (options.output_format_id % noutput, segment)) if outfile_map: outfile_map.write( "%s\t%s\n" % (id, options.output_format_id % noutput)) for x in range(start, end): counts[x] += 1 nreads += 1 covered += read_length read_lengths.append(read_length) if options.loglevel >= 2: options.stdout.write("# transcript %s: len=%i, nreads=%i, len_mean=%.2f, len_std=%.2f, cov_mean=%.2f, cov_stddev=%.2f\n" % (id, lsequence, nreads, numpy.mean( read_lengths), numpy.std( read_lengths), numpy.mean( counts), numpy.std(counts))) if outfile_stats: outfile_stats.write("%s\t%i\t%i\t%.2f\t%.2f\t%.2f\t%.2f\n" % (id, lsequence, nreads, numpy.mean( read_lengths), numpy.std( read_lengths), numpy.mean( counts), numpy.std(counts))) total_counts += list(counts) total_read_lengths += read_lengths total_error_pids += error_pids if options.test and ninput >= options.test: break if options.sample_size and ninput >= options.sample_size: break if options.loglevel >= 1: output = ["len=%i, nreads=%i" % (total_len, noutput)] output.append("len_mean=%.2f, len_std=%.2f, cov_mean=%.2f, cov_stddev=%.2f" % ( numpy.mean(total_read_lengths), numpy.std(total_read_lengths), numpy.mean(total_counts), numpy.std(total_counts))) no_uncovered = [x for x in total_counts if x > 0] output.append("cov0_mean=%.2f, cov0_stddev=%.2f" % (numpy.mean(no_uncovered), numpy.std(no_uncovered))) if do_mutate: output.append("pid_mean=%.2f, pid_std=%.2f" % (numpy.mean(total_pids), numpy.std(total_pids))) if do_error: output.append("pid_error_mean=%.2f, pid_error_std=%.2f" % (numpy.mean(total_error_pids), numpy.std(total_error_pids))) options.stdlog.write("# effective: %s\n" % ", ".join(output)) if options.loglevel >= 1: options.stdlog.write( "# ninput=%i, noutput=%i, nskipped=%i\n" % (ninput, noutput, nskipped)) E.Stop()
KeyError
dataset/ETHPy150Open CGATOxford/cgat/scripts/gtf2reads.py/main
8,427
@register.tag def render_form_field(parser, token): """ Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %} - optional_help_text and optional_css_classes are strings - if optional_help_text is not given, then it is taken from form field object """ try: help_text = None css_classes = None token_split = token.split_contents() if len(token_split) == 4: tag_name, form_field, help_text, css_classes = token.split_contents() elif len(token_split) == 3: tag_name, form_field, help_text = token.split_contents() else: tag_name, form_field = token.split_contents() except __HOLE__: raise template.TemplateSyntaxError( "Unable to parse arguments for {0}".format(repr(token.contents.split()[0]))) return FormFieldNode(form_field, help_text=help_text, css_classes=css_classes)
ValueError
dataset/ETHPy150Open Tivix/django-common/django_common/templatetags/custom_tags.py/render_form_field
8,428
def test_run__requires_result(self): suite = unittest.TestSuite() try: suite.run() except __HOLE__: pass else: self.fail("Failed to raise TypeError") # "Run the tests associated with this suite, collecting the result into # the test result object passed as result."
TypeError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/test/test_suite.py/Test_TestSuite.test_run__requires_result
8,429
def test_addTest__noniterable(self): suite = unittest.TestSuite() try: suite.addTests(5) except __HOLE__: pass else: self.fail("Failed to raise TypeError")
TypeError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/test/test_suite.py/Test_TestSuite.test_addTest__noniterable
8,430
def all(user, groupby='week', summary='default', network=False, split_week=False, split_day=False, attributes=True, flatten=False): """ Returns a dictionary containing all bandicoot indicators for the user, as well as reporting variables. Relevant indicators are defined in the 'individual', and 'spatial' modules. =================================== ======================================================================= Reporting variables Description =================================== ======================================================================= antennas_path path of the CSV file containing antennas locations attributes_path directory where attributes were loaded version bandicoot version groupby grouping method ('week' or None) split_week whether or not indicators are also computed for weekday and weekend split_day whether or not indicators are also computed for day and night start_time time of the first record end_time time of the last record night_start, night_end start and end time to define nights weekend days used to define the weekend (``[6, 7]`` by default, where 1 is Monday) bins number of weeks if the record are grouped has_call whether or not records include calls has_text whether or not records include texts has_home whether or not a :meth:`home location <bandicoot.core.User.recompute_home>` has been found has_network whether or not correspondents where loaded percent_records_missing_location percentage of records without location antennas_missing_locations number of antennas missing a location percent_outofnetwork_calls percentage of calls, received or emitted, made with a correspondant not loaded in the network percent_outofnetwork_texts percentage of texts with contacts not loaded in the network percent_outofnetwork_contacts percentage of contacts not loaded in the network percent_outofnetwork_call_durations percentage of minutes of calls where the contact was not loaded in the network number_of_records total number of records =================================== ======================================================================= We also include a last set of reporting variables, for the records ignored at load-time. Values can be ignored due to missing or inconsistent fields (e.g., not including a valid 'datetime' value). .. code-block:: python { 'all': 0, 'interaction': 0, 'direction': 0, 'correspondent_id': 0, 'datetime': 0, 'call_duration': 0 } with the total number of records ignored (key ``'all'``), as well as the number of records with faulty values for each columns. """ # Warn the user if they are selecting weekly and there's only one week if groupby is not None: if len(set(DATE_GROUPERS[groupby](r.datetime) for r in user.records)) <= 1: print warning_str('Grouping by week, but all data is from the same week!') scalar_type = 'distribution_scalar' if groupby == 'week' else 'scalar' summary_type = 'distribution_summarystats' if groupby == 'week' else 'summarystats' number_of_interactions_in = partial(bc.individual.number_of_interactions, direction='in') number_of_interactions_in.__name__ = 'number_of_interaction_in' number_of_interactions_out = partial(bc.individual.number_of_interactions, direction='out') number_of_interactions_out.__name__ = 'number_of_interaction_out' functions = [ (bc.individual.active_days, scalar_type), (bc.individual.number_of_contacts, scalar_type), (bc.individual.call_duration, summary_type), (bc.individual.percent_nocturnal, scalar_type), (bc.individual.percent_initiated_conversations, scalar_type), (bc.individual.percent_initiated_interactions, scalar_type), (bc.individual.response_delay_text, summary_type), (bc.individual.response_rate_text, scalar_type), (bc.individual.entropy_of_contacts, scalar_type), (bc.individual.balance_of_contacts, summary_type), (bc.individual.interactions_per_contact, summary_type), (bc.individual.interevent_time, summary_type), (bc.individual.percent_pareto_interactions, scalar_type), (bc.individual.percent_pareto_durations, scalar_type), (bc.individual.number_of_interactions, scalar_type), (number_of_interactions_in, scalar_type), (number_of_interactions_out, scalar_type), (bc.spatial.number_of_antennas, scalar_type), (bc.spatial.entropy_of_antennas, scalar_type), (bc.spatial.percent_at_home, scalar_type), (bc.spatial.radius_of_gyration, scalar_type), (bc.spatial.frequent_antennas, scalar_type), (bc.spatial.churn_rate, scalar_type) ] network_functions = [ bc.network.clustering_coefficient_unweighted, bc.network.clustering_coefficient_weighted, bc.network.assortativity_attributes, bc.network.assortativity_indicators ] groups = [[r for r in g] for g in group_records(user, groupby=groupby)] reporting = OrderedDict([ ('antennas_path', user.antennas_path), ('attributes_path', user.attributes_path), ('version', bc.__version__), ('groupby', groupby), ('split_week', split_week), ('split_day', split_day), ('start_time', user.start_time and str(user.start_time)), ('end_time', user.end_time and str(user.end_time)), ('night_start', str(user.night_start)), ('night_end', str(user.night_end)), ('weekend', user.weekend), ('bins', len(groups)), ('has_call', user.has_call), ('has_text', user.has_text), ('has_home', user.has_home), ('has_network', user.has_network), ('percent_records_missing_location', bc.helper.tools.percent_records_missing_location(user)), ('antennas_missing_locations', bc.helper.tools.antennas_missing_locations(user)), ('percent_outofnetwork_calls', user.percent_outofnetwork_calls), ('percent_outofnetwork_texts', user.percent_outofnetwork_texts), ('percent_outofnetwork_contacts', user.percent_outofnetwork_contacts), ('percent_outofnetwork_call_durations', user.percent_outofnetwork_call_durations), ]) if user.records is not None: reporting['number_of_records'] = len(user.records) else: reporting['number_of_records'] = 0. if user.ignored_records is not None: reporting['ignored_records'] = user.ignored_records returned = OrderedDict([ ('name', user.name), ('reporting', reporting) ]) for fun, datatype in functions: try: metric = fun(user, groupby=groupby, summary=summary, datatype=datatype, split_week=split_week, split_day=split_day) except __HOLE__: metric = fun(user, groupby=groupby, datatype=datatype, split_week=split_week, split_day=split_day) returned[fun.__name__] = metric if network and user.has_network: for fun in network_functions: returned[fun.__name__] = fun(user) if attributes and user.attributes != {}: returned['attributes'] = user.attributes if flatten is True: return globals()['flatten'](returned) return returned
ValueError
dataset/ETHPy150Open yvesalexandre/bandicoot/bandicoot/utils.py/all
8,431
def sequential_join(left_rows, right_rows, header=True): """ Join two tables by aligning them horizontally without performing any filtering. """ len_left_headers = len(left_rows[0]) len_right_headers = len(right_rows[0]) if header: output = [left_rows[0] + right_rows[0]] left_rows = left_rows[1:] right_rows = iter(right_rows[1:]) else: output = [] for left_row in left_rows: try: right_row = next(right_rows) output.append(left_row + right_row) except __HOLE__: output.append(left_row + [u''] * len_right_headers) for right_row in right_rows: output.append([u''] * len_left_headers + right_row) return output
StopIteration
dataset/ETHPy150Open wireservice/csvkit/csvkit/join.py/sequential_join
8,432
def _to_link_header(self, link): """ Convert the link tuple to a link header string. Used internally. """ try: bucket, key, tag = link except __HOLE__: raise RiakError("Invalid link tuple %s" % link) tag = tag if tag is not None else bucket url = self.object_path(bucket, key) header = '<%s>; riaktag="%s"' % (url, tag) return header
ValueError
dataset/ETHPy150Open basho/riak-python-client/riak/codecs/http.py/HttpCodec._to_link_header
8,433
@staticmethod def displayRow( pRow, pDescription, pDepth=0 ): ''' Print content row for a particular table description and data. ''' line="" for column in pDescription.columns: if column['visible'] == True: try: if type(column['field']) is tuple: # The field is a formula formula = column['field'][0] args = [ pRow[curField] for curField in column['field'][1:] ] try: data = str(formula( *args )) except Exception, e: print "Invalid formula execution for %r --> %r" % (column['field'], e) sys.exit() # Apply transformation if specified, this process take a data and calls a method to retrieve a string if 'transform' in column.keys(): try: data = column['transform']( data ) except Exception, e: print "Invalid transformation for column %s = %r --> %r" % (column['label'], data, e) sys.exit() elif column['field'] not in pRow or pRow[column['field']] is None: # The field is None or does not exists in row, use replacement text data = "-" else: data = pRow[column['field']] # Apply transformation if specified, this process take a data and calls a method to retrieve a string if 'transform' in column.keys(): try: data = column['transform']( data ) except Exception, e: print "Invalid transformation for column %r --> %r" % (column['field'], e) sys.exit() # Limit length of the data if specified if 'truncate' in column.keys() and len(data) > column['truncate']: data = data[:(column['truncate']-3)]+"..." line += column['dataFormat'] % data except __HOLE__, e: print "Error displaying column %r --> %r" % ( column, e ) raise e # Once all columns are processed, display the full line if pDepth == 0: print line else: print (" "*pDepth)+"`"+line if 'items' in pRow: pDepth += 1 for child in pRow['items']: CustomTable.displayRow( child, pDescription, pDepth )
KeyError
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/pulitools/puliquery/common.py/CustomTable.displayRow
8,434
def render(self, data, accepted_media_type=None, renderer_context=None): self.set_filename(self.basename(data), renderer_context) fp = data['image'] try: fp.seek(0, 2) except __HOLE__: size = os.path.getsize(fp) fp = open(fp) else: size = fp.tell() fp.seek(0) self.set_response_length(size, renderer_context) return fp
AttributeError
dataset/ETHPy150Open bkg/django-spillway/spillway/renderers/gdal.py/BaseGDALRenderer.render
8,435
def set_filename(self, name, renderer_context): type_name = 'attachment; filename=%s' % name try: renderer_context['response']['Content-Disposition'] = type_name except (KeyError, __HOLE__): pass
TypeError
dataset/ETHPy150Open bkg/django-spillway/spillway/renderers/gdal.py/BaseGDALRenderer.set_filename
8,436
def set_response_length(self, length, renderer_context): try: renderer_context['response']['Content-Length'] = length except (__HOLE__, TypeError): pass
KeyError
dataset/ETHPy150Open bkg/django-spillway/spillway/renderers/gdal.py/BaseGDALRenderer.set_response_length
8,437
def render(self, data, accepted_media_type=None, renderer_context=None): if isinstance(data, dict): data = [data] zipname = '%s.%s' % (self.arcdirname, self.format) self.set_filename(zipname, renderer_context) fp = tempfile.TemporaryFile(suffix='.%s' % self.format) with zipfile.ZipFile(fp, mode='w') as zf: for item in data: arcname = os.path.join(self.arcdirname, self.basename(item)) io = item['image'] try: zf.writestr(arcname, io.read()) except __HOLE__: zf.write(io, arcname=arcname) else: io.close() self.set_response_length(fp.tell(), renderer_context) fp.seek(0) return fp
AttributeError
dataset/ETHPy150Open bkg/django-spillway/spillway/renderers/gdal.py/GeoTIFFZipRenderer.render
8,438
@classmethod def split_input(cls, mapper_spec): shard_count = mapper_spec.shard_count # Grab the input parameters for the split params = input_readers._get_params(mapper_spec) logging.info("Params: %r", params) db = params['db'] # Unpickle the query app, model = params['model'].split('.') model = apps.get_model(app, model) # Grab the lowest pk query = model.objects.using(db).all() query = query.order_by('pk') try: first_id = query.values_list('pk', flat=True)[:1][0] query = query.order_by('-pk') last_id = query.values_list('pk', flat=True)[:1][0] except __HOLE__: return [DjangoInputReader(0, 0, params['model'], db=db)] pk_range = last_id - first_id logging.info("Query range: %s - %s = %s", first_id, last_id, pk_range) if pk_range < shard_count or shard_count == 1: return [DjangoInputReader(first_id-1, last_id, params['model'], db=db)] readers = [] max_shard_size = int(float(pk_range) / float(shard_count)) if pk_range % shard_count: max_shard_size += 1 shard_id = 1 # Splitting could be much smarter by taking a __scatter__ sample and # clustering, which is how the DatastoreInputWriter from the mapreduce # splits on pks for i in itertools.count(first_id-1, max_shard_size): if i >= last_id: break shard_start_id = i shard_end_id = i + max_shard_size if shard_end_id > last_id: shard_end_id = last_id logging.info("Creating shard: %s - %s", shard_start_id, shard_end_id) reader = DjangoInputReader(shard_start_id, shard_end_id, params['model'], db=db) reader.shard_id = shard_id readers.append(reader) shard_id += 1 return readers
IndexError
dataset/ETHPy150Open potatolondon/djangae/djangae/contrib/mappers/readers.py/DjangoInputReader.split_input
8,439
def __init__(self, **kwargs): View.__init__(self, **kwargs) # Allow this view to easily switch between feed formats. format = kwargs.get('format', self.format) try: self.feed_type = _FEED_FORMATS[format] except __HOLE__: raise ValueError("Unsupported feed format: {0}. Supported are: {1}".format( self.format, ', '.join(sorted(_FEED_FORMATS.iterkeys())) ))
KeyError
dataset/ETHPy150Open edoburu/django-fluent-blogs/fluent_blogs/views/feeds.py/FeedView.__init__
8,440
def decode(arg, delimiter=None, encodeseq=None): '''Decode a single argument from the file-system''' arg = coerce_unicode(arg, _c.FSQ_CHARSET) new_arg = sep = u'' delimiter, encodeseq = delimiter_encodeseq( _c.FSQ_DELIMITER if delimiter is None else delimiter, _c.FSQ_ENCODE if encodeseq is None else encodeseq, _c.FSQ_CHARSET) # char-wise decode walk -- minimally stateful encoding_trg = sep for c in arg: if len(encoding_trg): encoding_trg = sep.join([encoding_trg, c]) if 4 != len(encoding_trg): continue try: c = chr(int(encoding_trg, 16)) except __HOLE__: raise FSQEncodeError(errno.EINVAL, u'invalid decode'\ u' target: {0}'.format(encoding_trg)) c = coerce_unicode(c, _c.FSQ_CHARSET) encoding_trg = sep elif c == encodeseq: encoding_trg = u'0x' continue new_arg = sep.join([new_arg, c]) # edge case, incomplete encoding at end of string if len(encoding_trg): raise FSQEncodeError(errno.EINVAL, u'truncated encoding at end of' u' argument: {0}'.format(encoding_trg)) return new_arg
ValueError
dataset/ETHPy150Open axialmarket/fsq/fsq/encode.py/decode
8,441
def pathsplit(path): """Split a /-delimited path into a directory part and a basename. :param path: The path to split. :return: Tuple with directory name and basename """ try: (dirname, basename) = path.rsplit("/", 1) except __HOLE__: return ("", path) else: return (dirname, basename)
ValueError
dataset/ETHPy150Open natestedman/Observatory/observatory/lib/dulwich/index.py/pathsplit
8,442
def lastfm_get_tree(self, method, **kwargs): kwargs.update({ 'autocorrect': 1, self.get_type(): unicode(self).encode('utf-8'), }) if hasattr(self, 'artist'): kwargs['artist'] = unicode(self.artist).encode('utf-8') if hasattr(self, 'album'): kwargs['album'] = unicode(self.album).encode('utf-8') url = 'http://ws.audioscrobbler.com/2.0/?api_key=%s&method=%s&%s' % ( settings.LASTFM_API_KEY, method, urllib.urlencode(kwargs) ) logger.info(url) try: tree = etree.parse(url) return tree except __HOLE__: print "Did not work: "+url return None
IOError
dataset/ETHPy150Open jpic/playlistnow.fm/apps/music/models.py/MusicalEntity.lastfm_get_tree
8,443
def test_is_true_failure(self): try: assert_that(False).is_true() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <True>, but was not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_bool.py/TestBool.test_is_true_failure
8,444
def test_is_false_failure(self): try: assert_that(True).is_false() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <False>, but was not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_bool.py/TestBool.test_is_false_failure
8,445
def _parse_version(version_string): version = [] for x in version_string.split('.'): try: version.append(int(x)) except __HOLE__: # x may be of the form dev-1ea1592 version.append(x) return tuple(version)
ValueError
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/utils/fixes.py/_parse_version
8,446
def makedirs(name, mode=0o777, exist_ok=False): """makedirs(name [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory already exists, raise an OSError if exist_ok is False. Otherwise no exception is raised. This is recursive. """ try: os.makedirs(name, mode=mode) except __HOLE__ as e: if (not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(name)): raise
OSError
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/utils/fixes.py/makedirs
8,447
def _validate_multicast_ip_range(self, network_profile): """ Validate multicast ip range values. :param network_profile: network profile object """ try: min_ip, max_ip = (network_profile ['multicast_ip_range'].split('-', 1)) except __HOLE__: msg = _LE("Invalid multicast ip address range. " "example range: 224.1.1.1-224.1.1.10") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) for ip in [min_ip, max_ip]: try: if not netaddr.IPAddress(ip).is_multicast(): msg = _LE("%s is not a valid multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): msg = _LE("%s is reserved multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) except netaddr.AddrFormatError: msg = _LE("%s is not a valid ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): msg = (_LE("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" " Range should be from low address to high address") % {'min_ip': min_ip, 'max_ip': max_ip}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg)
ValueError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/ml2/drivers/cisco/n1kv/network_profile_service.py/NetworkProfile_db_mixin._validate_multicast_ip_range
8,448
def get_decompressed_message(): """ For upload formats that support it, detect gzip Content-Encoding headers and de-compress on the fly. :rtype: str :returns: The de-compressed request body. """ content_encoding = request.headers.get('Content-Encoding', '') if content_encoding in ['gzip', 'deflate']: # Compressed request. We have to decompress the body, then figure out # if it's form-encoded. try: # Auto header checking. message_body = zlib.decompress(request.body.read(), 15 + 32) except zlib.error: # Negative wbits suppresses adler32 checksumming. message_body = zlib.decompress(request.body.read(), -15) # At this point, we're not sure whether we're dealing with a straight # un-encoded POST body, or a form-encoded POST. Attempt to parse the # body. If it's not form-encoded, this will return an empty dict. form_enc_parsed = urlparse.parse_qs(message_body) if form_enc_parsed: # This is a form-encoded POST. The value of the data attrib will # be the body we're looking for. try: message_body = form_enc_parsed['data'][0] except (KeyError, __HOLE__): raise MalformedUploadError( "No 'data' POST key/value found. Check your POST key " "name for spelling, and make sure you're passing a value." ) else: # Uncompressed request. Bottle handles all of the parsing of the # POST key/vals, or un-encoded body. data_key = request.forms.get('data') if data_key: # This is a form-encoded POST. Support the silly people. message_body = data_key else: # This is a non form-encoded POST body. message_body = request.body.read() return message_body
IndexError
dataset/ETHPy150Open gtaylor/EVE-Market-Data-Relay/emdr/daemons/gateway/wsgi.py/get_decompressed_message
8,449
def parse_and_error_handle(parser, data, upload_format): """ Standardized parsing and error handling for parsing. Returns the final HTTP body to send back to the uploader after parsing, or error messages. :param callable parser: The parser function to use to parse ``data``. :param object data: An dict or str of parser-specific data to parse using the callable specified in ``parser``. :param str upload_format: Upload format identifier for the logs. :rtype: str :returns: The HTTP body to return. """ try: parsed_message = parser(data) except ( EMDSError, MalformedUploadError, TypeError, __HOLE__ ) as exc: # Something bad happened. We know this will return at least a # semi-useful error message, so do so. response.status = 400 logger.error("Error to %s: %s" % (get_remote_address(), exc.message)) return exc.message ip_hash_salt = settings.GATEWAY_IP_KEY_SALT if ip_hash_salt: # If an IP hash is set, salt+hash the uploader's IP address and set # it as the EMDR upload key value. ip_hash = hashlib.sha1(ip_hash_salt + get_remote_address()).hexdigest() parsed_message.upload_keys.append({'name': 'EMDR', 'key': ip_hash}) # Sends the parsed MarketOrderList or MarketHistoryList to the Announcers # as compressed JSON. gevent.spawn(order_pusher.push_message, parsed_message) logger.info("Accepted %s %s upload from %s" % ( upload_format, parsed_message.list_type, get_remote_address() )) # Goofy, but apparently expected by EVE Market Data Uploader. return '1'
ValueError
dataset/ETHPy150Open gtaylor/EVE-Market-Data-Relay/emdr/daemons/gateway/wsgi.py/parse_and_error_handle
8,450
def load_starting_page(config_data): """ Load starting page into the CMS :param config_data: configuration data """ with chdir(config_data.project_directory): env = deepcopy(dict(os.environ)) env[str('DJANGO_SETTINGS_MODULE')] = str('{0}.settings'.format(config_data.project_name)) env[str('PYTHONPATH')] = str(os.pathsep.join(map(shlex_quote, sys.path))) subprocess.check_call([sys.executable, 'starting_page.py'], env=env) for ext in ['py', 'pyc', 'json']: try: os.remove('starting_page.{0}'.format(ext)) except __HOLE__: pass
OSError
dataset/ETHPy150Open nephila/djangocms-installer/djangocms_installer/django/__init__.py/load_starting_page
8,451
def trap_exit_fail(f): def test_wrapper(*args): try: f(*args) except __HOLE__: import traceback print (traceback.format_exc()) assert False test_wrapper.__name__ = f.__name__ return test_wrapper
SystemExit
dataset/ETHPy150Open hyde/commando/commando/tests/test_commando.py/trap_exit_fail
8,452
def trap_exit_pass(f): def test_wrapper(*args): try: print (f.__name__) f(*args) except __HOLE__: pass test_wrapper.__name__ = f.__name__ return test_wrapper
SystemExit
dataset/ETHPy150Open hyde/commando/commando/tests/test_commando.py/trap_exit_pass
8,453
def test_command_version_param(): with patch.object(BasicCommandLine, '_main') as _main: c = BasicCommandLine() exception = False try: c.parse(['--version']) assert False except __HOLE__: exception = True assert exception assert not _main.called
SystemExit
dataset/ETHPy150Open hyde/commando/commando/tests/test_commando.py/test_command_version_param
8,454
def test_command_version(): class VersionCommandLine(Application): @command(description='test', prog='Basic') @param('--force', action='store_true', dest='force1') @param('--force2', action='store', dest='force2') @version('--version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() def _main(self): pass with patch.object(VersionCommandLine, '_main') as _main: c = VersionCommandLine() exception = False try: c.parse(['--version']) assert False except __HOLE__: exception = True assert exception assert not _main.called
SystemExit
dataset/ETHPy150Open hyde/commando/commando/tests/test_commando.py/test_command_version
8,455
def __new__(self, *args): obj = super(Matcher, self).__new__(self) try: argspec = inspect.getargspec(type(obj).__init__) except __HOLE__: return obj else: if argspec.varargs or argspec.keywords: return obj nargs = len(argspec.args) - 1 if len(args) <= nargs: # <= because for example (at least) copy.copy causes us to be called with no arguments return obj else: obj.__init__(*args[:nargs]) return obj.__eq__(*args[nargs:])
TypeError
dataset/ETHPy150Open eallik/spinoff/spinoff/util/pattern_matching.py/Matcher.__new__
8,456
def remove_empty_bridges(): try: interface_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e) sys.exit(1) LOG.info(_LI("Interface mappings: %s."), interface_mappings) try: bridge_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error(_LE("Parsing bridge_mappings failed: %s."), e) sys.exit(1) LOG.info(_LI("Bridge mappings: %s."), bridge_mappings) lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) bridge_names = lb_manager.get_deletable_bridges() for bridge_name in bridge_names: if lb_manager.get_tap_devices_count(bridge_name): continue try: lb_manager.delete_bridge(bridge_name) LOG.info(_LI("Linux bridge %s deleted"), bridge_name) except __HOLE__: LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name) LOG.info(_LI("Linux bridge cleanup completed successfully"))
RuntimeError
dataset/ETHPy150Open openstack/neutron/neutron/cmd/linuxbridge_cleanup.py/remove_empty_bridges
8,457
def check_gitignore(): # checks .gitignore for .gitver inclusion try: gifile = os.path.join(GITIGNOREFILE) with open(gifile, 'r') as f: if CFGDIRNAME in f.read(): return True except __HOLE__: pass return False
IOError
dataset/ETHPy150Open manuelbua/gitver/gitver/sanity.py/check_gitignore
8,458
def __enter__(self): try: from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol # Note that this will only work with a CDH release. # This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax. # If using the Apache release of Hive this import will fail. from hive_metastore import ThriftHiveMetastore config = luigi.configuration.get_config() host = config.get('hive', 'metastore_host') port = config.getint('hive', 'metastore_port') transport = TSocket.TSocket(host, port) transport = TTransport.TBufferedTransport(transport) protocol = TBinaryProtocol.TBinaryProtocol(transport) transport.open() self.transport = transport return ThriftHiveMetastore.Client(protocol) except __HOLE__ as e: raise Exception('Could not import Hive thrift library:' + str(e))
ImportError
dataset/ETHPy150Open spotify/luigi/luigi/contrib/hive.py/HiveThriftContext.__enter__
8,459
def reorder_data(data, shape, torder): """ Reorder raw data from file. Parameters ---------- data : 2D ndarray Raw data as ordered in binary file. shape : tuple of ints Shape of the NMR data. torder : {'f', 'r', 'o' of Pytho function} Trace ordering . See :py:func:`read` for details. Returns ------- tdata : ndarray Array of NMR data. Notes ----- Minimal error checking is done to see if data and shape contain the same number of values. This should be done before calling this function. """ # take care of flat files... if torder == 'flat' or torder == 'f': try: data = data.reshape(shape) except __HOLE__: warn(str(data.shape) + "cannot be shaped into" + str(shape)) return data # all other cases # make an empty array to hold reordered data ndata = np.empty(shape, dtype=data.dtype) # index2tuple converter i2t = torder2i2t(torder) # loop over all non-direct dimension indices for tup in np.ndindex(shape[:-1]): # determine the corresponding trace ntrace = i2t(shape[:-1], tup) # write the trace to the index ndata[tup] = data[ntrace] return ndata
ValueError
dataset/ETHPy150Open jjhelmus/nmrglue/nmrglue/fileio/varian.py/reorder_data
8,460
def read_fid(filename, shape=None, torder='flat', as_2d=False, read_blockhead=False): """ Read a Agilent/Varian binary (fid) file. Parameters ---------- filename : str Filename of Agilent/Varian binary file (fid) to read. shape : tuple of ints, optional Shape of the binary data. If not provided data is returned as a 2D array. Required if more than one trace per block (non-standard). torder : {'f', 'n', 'o'} Trace order. See :py:func:`read` for details. as_2d : bool, optional True to return the data as a 2D array, ignoring the shape and torder parameters. read_blockhead : bool, optional True to read the Agilent/Varian blockheaders(s) into the returned dictionary. False ignores them. Returns ------- dic : dict Dictionary of Agilent/Varian binary file parameters. data : ndarray Array of NMR data. See Also -------- read_fid_lowmem : Read a Agilent/Varian binary file using minimal amounts of memory. read : Read Agilent/Varian files from a directory. """ # open the file f = open(filename, 'rb') # read the fileheader dic = fileheader2dic(get_fileheader(f)) # if ntraces is not 1 use _ntraces version if dic["ntraces"] != 1: return read_fid_ntraces(filename, shape, torder, as_2d, read_blockhead) # data parameters dt = find_dtype(dic) nblocks = dic["nblocks"] pts = dic["np"] nbheaders = dic["nbheaders"] # read the data if read_blockhead: bdic, data = get_nblocks(f, nblocks, pts, nbheaders, dt, read_blockhead) dic["blockheader"] = bdic else: data = get_nblocks(f, nblocks, pts, nbheaders, dt, read_blockhead) f.close() # uninterleave the real and imaginary data data = uninterleave_data(data) # return as 2D is requested if as_2d: return dic, data # return raw data if no shape provided. if shape is None: warn("unknown shape, returning unshaped data") return dic, data # check for 1D if data.shape[0] == 1: return dic, np.squeeze(data) # try to reshape # reorder 3D/4D data if len(shape) >= 3: try: return dic, reorder_data(data, shape, torder) except: warn("data cannot be re-ordered, returning raw 2D data\n" + "Provided shape: " + str(shape) + " torder: " + str(torder)) return dic, data try: data = data.reshape(shape) except __HOLE__: warn(str(data.shape) + "cannot be shaped into" + str(shape)) return dic, data return dic, data
ValueError
dataset/ETHPy150Open jjhelmus/nmrglue/nmrglue/fileio/varian.py/read_fid
8,461
def read_fid_ntraces(filename, shape=None, torder='flat', as_2d=False, read_blockhead=False): """ Read a Agilent/Varian binary (fid) file possibility having multiple traces per block. Parameters ---------- filename : str Filename of Agilent/Varian binary file (fid) to read. shape : tuple of ints, optional Shape of the binary data. If not provided data is returned as a 2D array. Required if more than one trace per block (non-standard). torder : {'f', 'n', 'o'} Trace order. See :py:func:`read` for details. as_2d : bool, optional True to return the data as a 2D array, ignoring the shape and torder parameters. read_blockhead : bool, optional True to read the Agilent/Varian blockheaders(s) into the returned dictionary. False ignores them. Returns ------- dic : dict Dictionary of Agilent/Varian binary file parameters. data : array_like Low memory object which can access NMR data on demand. See Also -------- read_fid : Read a Agilent/Varian binary file with one trace per block. read_fid_lowmem : Read a Agilent/Varian binary file with one trace per block using minimal amounts of memory. """ # open the file f = open(filename, 'rb') # read the fileheader dic = fileheader2dic(get_fileheader(f)) # data parameters dt = find_dtype(dic) nblocks = dic["nblocks"] pts = dic["np"] nbheaders = dic["nbheaders"] ntraces = dic["ntraces"] # read the data if read_blockhead: bdic, data = get_nblocks_ntraces(f, nblocks, ntraces, pts, nbheaders, dt, read_blockhead) dic["blockheader"] = bdic else: data = get_nblocks_ntraces(f, nblocks, ntraces, pts, nbheaders, dt, read_blockhead) f.close() # uninterleave the real and imaginary data data = uninterleave_data(data) # if 2D array requested, return unshaped if as_2d: return dic, data # check for 1D if data.shape[0] == 1: return dic, np.squeeze(data) # try to reshape if shape is None: warn("unknown shape, returning unshaped data") return dic, data # reorder 3D/4D data if len(shape) >= 3: return dic, reorder_data(data, shape, torder) try: data = data.reshape(shape) except __HOLE__: warn(str(data.shape) + "cannot be shaped into" + str(shape)) return dic, data return dic, data
ValueError
dataset/ETHPy150Open jjhelmus/nmrglue/nmrglue/fileio/varian.py/read_fid_ntraces
8,462
def init(config, benchmark): benchmark.executable = benchmark.tool.executable() benchmark.tool_version = benchmark.tool.version(benchmark.executable) try: processes = subprocess.Popen(['ps', '-eo', 'cmd'], stdout=subprocess.PIPE).communicate()[0] if len(re.findall("python.*benchmark\.py", util.decode_to_string(processes))) > 1: logging.warning("Already running instance of this script detected. " "Please make sure to not interfere with somebody else's benchmarks.") except __HOLE__: pass # this does not work on Windows
OSError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/localexecution.py/init
8,463
def execute_benchmark(benchmark, output_handler): run_sets_executed = 0 logging.debug("I will use %s threads.", benchmark.num_of_threads) if benchmark.requirements.cpu_model \ or benchmark.requirements.cpu_cores != benchmark.rlimits.get(CORELIMIT, None) \ or benchmark.requirements.memory != benchmark.rlimits.get(MEMLIMIT, None): logging.warning("Ignoring specified resource requirements in local-execution mode, " "only resource limits are used.") my_cgroups = cgroups.find_my_cgroups() coreAssignment = None # cores per run memoryAssignment = None # memory banks per run if CORELIMIT in benchmark.rlimits: if not my_cgroups.require_subsystem(cgroups.CPUSET): sys.exit("Cgroup subsystem cpuset is required for limiting the number of CPU cores/memory nodes.") coreAssignment = get_cpu_cores_per_run(benchmark.rlimits[CORELIMIT], benchmark.num_of_threads, my_cgroups) memoryAssignment = get_memory_banks_per_run(coreAssignment, my_cgroups) if MEMLIMIT in benchmark.rlimits: # check whether we have enough memory in the used memory banks for all runs check_memory_size(benchmark.rlimits[MEMLIMIT], benchmark.num_of_threads, memoryAssignment, my_cgroups) if benchmark.num_of_threads > 1 and systeminfo.is_turbo_boost_enabled(): logging.warning("Turbo boost of CPU is enabled. " "Starting more than one benchmark in parallel affects the CPU frequency " "and thus makes the performance unreliable.") if benchmark.num_of_threads > 1 and benchmark.config.users: if len(benchmark.config.users) == 1: logging.warning( 'Executing multiple parallel benchmarks under same user account. ' 'Consider specifying multiple user accounts for increased separation of runs.') benchmark.config.users = [benchmark.config.users[0] for i in range(benchmark.num_of_threads)] elif len(benchmark.config.users) < benchmark.num_of_threads: sys.exit('Distributing parallel runs to different user accounts was requested, but not enough accounts were given. Please specify {} user accounts, or only one account.'.format(benchmark.num_of_threads)) elif len(benchmark.config.users) != len(set(benchmark.config.users)): sys.exit('Same user account was specified multiple times, please specify {} separate accounts, or only one account.'.format(benchmark.num_of_threads)) throttle_check = systeminfo.CPUThrottleCheck() swap_check = systeminfo.SwapCheck() # iterate over run sets for runSet in benchmark.run_sets: if STOPPED_BY_INTERRUPT: break if not runSet.should_be_executed(): output_handler.output_for_skipping_run_set(runSet) elif not runSet.runs: output_handler.output_for_skipping_run_set(runSet, "because it has no files") else: run_sets_executed += 1 # get times before runSet ruBefore = resource.getrusage(resource.RUSAGE_CHILDREN) walltime_before = util.read_monotonic_time() energyBefore = util.measure_energy() output_handler.output_before_run_set(runSet) # put all runs into a queue for run in runSet.runs: _Worker.working_queue.put(run) # create some workers for i in range(benchmark.num_of_threads): cores = coreAssignment[i] if coreAssignment else None memBanks = memoryAssignment[i] if memoryAssignment else None user = benchmark.config.users[i] if benchmark.config.users else None WORKER_THREADS.append(_Worker(benchmark, cores, memBanks, user, output_handler)) # wait until all tasks are done, # instead of queue.join(), we use a loop and sleep(1) to handle KeyboardInterrupt finished = False while not finished and not STOPPED_BY_INTERRUPT: try: _Worker.working_queue.all_tasks_done.acquire() finished = (_Worker.working_queue.unfinished_tasks == 0) finally: _Worker.working_queue.all_tasks_done.release() try: time.sleep(0.1) # sleep some time except __HOLE__: stop() # get times after runSet walltime_after = util.read_monotonic_time() energy = util.measure_energy(energyBefore) usedWallTime = walltime_after - walltime_before ruAfter = resource.getrusage(resource.RUSAGE_CHILDREN) usedCpuTime = (ruAfter.ru_utime + ruAfter.ru_stime) \ - (ruBefore.ru_utime + ruBefore.ru_stime) if STOPPED_BY_INTERRUPT: output_handler.set_error('interrupted', runSet) output_handler.output_after_run_set(runSet, cputime=usedCpuTime, walltime=usedWallTime, energy=energy) for worker in WORKER_THREADS: worker.cleanup() if throttle_check.has_throttled(): logging.warning('CPU throttled itself during benchmarking due to overheating. ' 'Benchmark results are unreliable!') if swap_check.has_swapped(): logging.warning('System has swapped during benchmarking. ' 'Benchmark results are unreliable!') output_handler.output_after_benchmark(STOPPED_BY_INTERRUPT) return 0
KeyboardInterrupt
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/localexecution.py/execute_benchmark
8,464
def run(self): while not _Worker.working_queue.empty() and not STOPPED_BY_INTERRUPT: currentRun = _Worker.working_queue.get_nowait() try: logging.debug('Executing run "%s"', currentRun.identifier) self.execute(currentRun) logging.debug('Finished run "%s"', currentRun.identifier) except __HOLE__ as e: logging.critical(e) except BaseException as e: logging.exception('Exception during run execution') _Worker.working_queue.task_done()
SystemExit
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/localexecution.py/_Worker.run
8,465
def execute(self, run): """ This function executes the tool with a sourcefile with options. It also calls functions for output before and after the run. """ self.output_handler.output_before_run(run) benchmark = self.benchmark memlimit = benchmark.rlimits.get(MEMLIMIT) args = run.cmdline() logging.debug('Command line of run is %s', args) run_result = \ self.run_executor.execute_run( args, run.log_file, hardtimelimit=benchmark.rlimits.get(TIMELIMIT), softtimelimit=benchmark.rlimits.get(SOFTTIMELIMIT), cores=self.my_cpus, memory_nodes=self.my_memory_nodes, memlimit=memlimit, environments=benchmark.environment(), workingDir=benchmark.working_directory(), maxLogfileSize=benchmark.config.maxLogfileSize) if self.run_executor.PROCESS_KILLED: # If the run was interrupted, we ignore the result and cleanup. try: if benchmark.config.debug: os.rename(run.log_file, run.log_file + ".killed") else: os.remove(run.log_file) except __HOLE__: pass return 1 if self.my_cpus: run_result['cpuCores'] = self.my_cpus if self.my_memory_nodes: run_result['memoryNodes'] = self.my_memory_nodes run.set_result(run_result) self.output_handler.output_after_run(run)
OSError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/localexecution.py/_Worker.execute
8,466
def check_numeric(self, value): """Cast value to int or float if necessary""" if not isinstance(value, (int, float)): try: value = int(value) except __HOLE__: value = float(value) return value
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/cell.py/Cell.check_numeric
8,467
def set_value_explicit(self, value = None, data_type = TYPE_STRING): """Coerce values according to their explicit type""" type_coercion_map = { self.TYPE_INLINE: self.check_string, self.TYPE_STRING: self.check_string, self.TYPE_FORMULA: unicode, self.TYPE_NUMERIC: self.check_numeric, self.TYPE_BOOL: bool, } try: self._value = type_coercion_map[data_type](value) except __HOLE__: if data_type not in self.VALID_TYPES: msg = 'Invalid data type: %s' % data_type raise DataTypeException(msg) self._data_type = data_type
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/cell.py/Cell.set_value_explicit
8,468
@staticmethod def _gen_tuples(list_object): while True: try: key = list_object.pop() value = list_object.pop() except __HOLE__: raise StopIteration else: yield key, value
IndexError
dataset/ETHPy150Open saltstack/salt/salt/modules/ini_manage.py/_Ini._gen_tuples
8,469
def itersubclasses(cls, _seen=None): """ itersubclasses(cls) Generator over all subclasses of a given class, in depth first order. >>> list(itersubclasses(int)) == [bool] True >>> class A(object): pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL (new-style) classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS ['type', ...'tuple', ...] """ if not isinstance(cls, type): raise TypeError('itersubclasses must be called with ' 'new-style classes, not %.100r' % cls) if _seen is None: _seen = set() try: subs = cls.__subclasses__() except __HOLE__: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub
TypeError
dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/expedient/common/extendable/inheritance.py/itersubclasses
8,470
def test_builtin(self): for name in ('str', 'str.translate', '__builtin__.str', '__builtin__.str.translate'): # test low-level function self.assertIsNotNone(pydoc.locate(name)) # test high-level function try: pydoc.render_doc(name) except __HOLE__: self.fail('finding the doc of {!r} failed'.format(o)) for name in ('not__builtin__', 'strrr', 'strr.translate', 'str.trrrranslate', '__builtin__.strrr', '__builtin__.str.trrranslate'): self.assertIsNone(pydoc.locate(name)) self.assertRaises(ImportError, pydoc.render_doc, name)
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_pydoc.py/TestHelper.test_builtin
8,471
def parse_date(self): # Try as best we can to parse the date into a datetime object. Note: # this assumes that we never see a timestamp, just the date, in any # QIF date. if self.date != "UNKNOWN": try: return dateutil.parser.parse(self.date, dayfirst=self.dayfirst) except __HOLE__: # dateutil.parser doesn't recognize dates of the # format "MMDDYYYY", though it does recognize # "MM/DD/YYYY". So, if parsing has failed above, # try shoving in some slashes and see if that # parses. try: if len(self.date) == 8: # The int() cast will only succeed if all 8 # characters of txn_date are numbers. If # it fails, it will throw an exception we # can catch below. date_int = int(self.date) # No exception? Great, keep parsing the # string (dateutil wants a string # argument). slashified = "%s/%s/%s" % (txn_date[0:2], txn_date[2:4], txn_date[4:]) return dateutil.parser.parse(slashified, dayfirst=dayfirst) except: pass # If we've made it this far, our guesses have failed. raise ValueError("Unrecognized date format: '%s'." % txn_date) else: return "UNKNOWN"
ValueError
dataset/ETHPy150Open wesabe/fixofx/lib/ofxtools/ofx_statement.py/OfxTransaction.parse_date
8,472
def write_input(self, SelPackList=False, check=False): """ Write the input. Parameters ---------- SelPackList : False or list of packages """ if check: # run check prior to writing input self.check(f='{}.chk'.format(self.name), verbose=self.verbose, level=1) # org_dir = os.getcwd() # os.chdir(self.model_ws) if self.verbose: print('\nWriting packages:') if SelPackList == False: for p in self.packagelist: if self.verbose: print(' Package: ', p.name[0]) # prevent individual package checks from running after model-level package check above # otherwise checks are run twice # or the model level check proceedure would have to be split up # or each package would need a check arguemnt, # or default for package level check would have to be False try: p.write_file(check=False) except TypeError: p.write_file() else: for pon in SelPackList: for i, p in enumerate(self.packagelist): if pon in p.name: if self.verbose: print(' Package: ', p.name[0]) try: p.write_file(check=False) except __HOLE__: p.write_file() break if self.verbose: print(' ') # write name file self.write_name_file() # os.chdir(org_dir) return
TypeError
dataset/ETHPy150Open modflowpy/flopy/flopy/mbase.py/BaseModel.write_input
8,473
@property def api_version(self): if not hasattr(self, "_api_version"): _api_version = None metas = [x for x in self.parsed.findall(".//meta") if x.get("name", "").lower() == "api-version"] if metas: try: _api_version = int(metas[0].get("value", None)) except (__HOLE__, ValueError): _api_version = None self._api_version = _api_version return self._api_version
TypeError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/index.py/HTMLPage.api_version
8,474
@property def verifiable(self): """ Returns True if this link can be verified after download, False if it cannot, and None if we cannot determine. """ trusted = self.trusted or getattr(self.comes_from, "trusted", None) if trusted is not None and trusted: # This link came from a trusted source. It *may* be verifiable but # first we need to see if this page is operating under the new # API version. try: api_version = getattr(self.comes_from, "api_version", None) api_version = int(api_version) except (ValueError, __HOLE__): api_version = None if api_version is None or api_version <= 1: # This link is either trusted, or it came from a trusted, # however it is not operating under the API version 2 so # we can't make any claims about if it's safe or not return if self.hash: # This link came from a trusted source and it has a hash, so we # can consider it safe. return True else: # This link came from a trusted source, using the new API # version, and it does not have a hash. It is NOT verifiable return False elif trusted is not None: # This link came from an untrusted source and we cannot trust it return False # An object to represent the "link" for the installed version of a requirement. # Using Inf as the url makes it sort higher.
TypeError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/index.py/Link.verifiable
8,475
def gds_validate_integer_list(self, input_data, node, input_name=''): values = input_data.split() for value in values: try: fvalue = float(value) except (__HOLE__, ValueError), exp: raise_parse_error(node, 'Requires sequence of integers') return input_data
TypeError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/GeneratedsSuper.gds_validate_integer_list
8,476
def gds_validate_float_list(self, input_data, node, input_name=''): values = input_data.split() for value in values: try: fvalue = float(value) except (TypeError, __HOLE__), exp: raise_parse_error(node, 'Requires sequence of floats') return input_data
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/GeneratedsSuper.gds_validate_float_list
8,477
def gds_validate_double_list(self, input_data, node, input_name=''): values = input_data.split() for value in values: try: fvalue = float(value) except (TypeError, __HOLE__), exp: raise_parse_error(node, 'Requires sequence of doubles') return input_data
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/GeneratedsSuper.gds_validate_double_list
8,478
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'UserID': UserID_ = child_.text UserID_ = self.gds_validate_string(UserID_, node, 'UserID') self.UserID = UserID_ self.validate_t_SSIN(self.UserID) # validate type t_SSIN elif nodeName_ == 'Email': Email_ = child_.text Email_ = self.gds_validate_string(Email_, node, 'Email') self.Email = Email_ # validate type t_EmailAddress self.validate_t_EmailAddress(self.Email) elif nodeName_ == 'OrgUnit': OrgUnit_ = child_.text OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit') self.OrgUnit = OrgUnit_ elif nodeName_ == 'MatrixID': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, __HOLE__), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'MatrixID') self.MatrixID = ival_ elif nodeName_ == 'MatrixSubID': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'MatrixSubID') self.MatrixSubID = ival_ # end class AuthorizedUserType
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/AuthorizedUserType.buildChildren
8,479
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'ReturnCode': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, __HOLE__), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'ReturnCode') self.ReturnCode = ival_ elif nodeName_ == 'Detail': obj_ = DetailMessageType.factory() obj_.build(child_) self.Detail.append(obj_) # end class ResultSummary
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/ResultSummary.buildChildren
8,480
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'SSIN': SSIN_ = child_.text SSIN_ = self.gds_validate_string(SSIN_, node, 'SSIN') self.SSIN = SSIN_ self.validate_t_SSIN(self.SSIN) # validate type t_SSIN elif nodeName_ == 'OrgUnit': OrgUnit_ = child_.text OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit') self.OrgUnit = OrgUnit_ elif nodeName_ == 'Purpose': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'Purpose') self.Purpose = ival_ elif nodeName_ == 'Period': obj_ = PeriodType.factory() obj_.build(child_) self.set_Period(obj_) elif nodeName_ == 'InscriptionCode': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'InscriptionCode') self.InscriptionCode = ival_ elif nodeName_ == 'PhaseCode': sval_ = child_.text try: ival_ = int(sval_) except (__HOLE__, ValueError), exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'PhaseCode') self.PhaseCode = ival_ # end class InscriptionType
TypeError
dataset/ETHPy150Open lsaffre/lino/lino/sandbox/bcss/SSDNRequest.py/InscriptionType.buildChildren
8,481
def fetch(self, url): socket.setdefaulttimeout(self.socket_timeout) req = Request(url, headers={'User-Agent': self.user_agent}) try: resp = fetch(req) except URLError: return False except __HOLE__: return False except socket.timeout: return False except ssl.SSLError: return False return resp
HTTPError
dataset/ETHPy150Open coleifer/micawber/micawber/providers.py/Provider.fetch
8,482
def handle_response(self, response, url): try: json_data = json.loads(response) except InvalidJson as exc: try: msg = exc.message except __HOLE__: msg = exc.args[0] raise InvalidResponseException(msg) if 'url' not in json_data: json_data['url'] = url if 'title' not in json_data: json_data['title'] = json_data['url'] return json_data
AttributeError
dataset/ETHPy150Open coleifer/micawber/micawber/providers.py/Provider.handle_response
8,483
def get_db_version(self): ''' Obtain the database schema version. Return: (negative, text) if error or version 0.0 where schema_version table is missing (version_int, version_text) if ok ''' cmd = "SELECT version_int,version,openmano_ver FROM schema_version" for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor() #print cmd self.cur.execute(cmd) rows = self.cur.fetchall() highest_version_int=0 highest_version="" #print rows for row in rows: #look for the latest version if row[0]>highest_version_int: highest_version_int, highest_version = row[0:2] return highest_version_int, highest_version except (mdb.Error, __HOLE__), e: #print cmd print "get_db_version DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_db_version
8,484
def disconnect(self): '''disconnect from specific data base''' try: self.con.close() del self.con except mdb.Error, e: print "Error disconnecting from DB: Error %d: %s" % (e.args[0], e.args[1]) return -1 except __HOLE__, e: #self.con not defined if e[0][-5:] == "'con'": return -1, "Database internal error, no connection." else: raise
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.disconnect
8,485
def new_row(self, table, INSERT, tenant_id=None, add_uuid=False, log=False, created_time=0): ''' Add one row into a table. Attribute INSERT: dictionary with the key: value to insert table: table where to insert tenant_id: only useful for logs. If provided, logs will use this tenant_id add_uuid: if True, it will create an uuid key entry at INSERT if not provided It checks presence of uuid and add one automatically otherwise Return: (result, uuid) where result can be 0 if error, or 1 if ok ''' if table in tables_with_created_field and created_time==0: created_time=time.time() for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor() return self._new_row_internal(table, INSERT, tenant_id, add_uuid, None, log, created_time) except (mdb.Error, __HOLE__), e: print "nfvo_db.new_row DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.new_row
8,486
def update_rows(self, table, UPDATE, WHERE, log=False, modified_time=0): ''' Update one or several rows into a table. Atributes UPDATE: dictionary with the key: value to change table: table where to update WHERE: dictionary of elements to update Return: (result, descriptive text) where result indicates the number of updated files ''' if table in tables_with_created_field and modified_time==0: modified_time=time.time() for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor() return self.__update_rows(table, UPDATE, WHERE, log) except (mdb.Error, __HOLE__), e: print "nfvo_db.update_rows DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.update_rows
8,487
def delete_row(self, table, uuid, tenant_id=None, log=True): for retry_ in range(0,2): try: with self.con: #delete host self.cur = self.con.cursor() cmd = "DELETE FROM %s WHERE uuid = '%s'" % (table, uuid) print cmd self.cur.execute(cmd) deleted = self.cur.rowcount if deleted == 1: #delete uuid if table == 'tenants': tenant_str=uuid elif tenant_id: tenant_str = tenant_id else: tenant_str = 'Null' self.cur = self.con.cursor() cmd = "DELETE FROM uuids WHERE root_uuid = '%s'" % uuid print cmd self.cur.execute(cmd) #inserting new log if log: cmd = "INSERT INTO logs (related,level,uuid,nfvo_tenant_id,description) VALUES ('%s','debug','%s','%s','delete %s')" % (table, uuid, tenant_str, table[:-1]) print cmd self.cur.execute(cmd) return deleted, table[:-1] + " '%s' %s" %(uuid, "deleted" if deleted==1 else "not found") except (mdb.Error, __HOLE__), e: print "nfvo_db.delete_row DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e, "delete", 'instances' if table=='hosts' or table=='tenants' else 'dependencies') if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.delete_row
8,488
def delete_row_by_dict(self, **sql_dict): ''' Deletes rows from a table. Attribute sql_dir: dictionary with the following key: value 'FROM': string of table name (Mandatory) 'WHERE': dict of key:values, translated to key=value AND ... (Optional) 'WHERE_NOT': dict of key:values, translated to key<>value AND ... (Optional) 'LIMIT': limit of number of rows (Optional) Return: the (number of items deleted, descriptive test) if ok; (negative, descriptive text) if error ''' #print sql_dict from_ = "FROM " + str(sql_dict['FROM']) #print 'from_', from_ if 'WHERE' in sql_dict and len(sql_dict['WHERE']) > 0: w=sql_dict['WHERE'] where_ = "WHERE " + " AND ".join(map(self.__tuple2db_format_where, w.iteritems())) else: where_ = "" if 'WHERE_NOT' in sql_dict and len(sql_dict['WHERE_NOT']) > 0: w=sql_dict['WHERE_NOT'] where_2 = " AND ".join(map(self.__tuple2db_format_where_not, w.iteritems())) if len(where_)==0: where_ = "WHERE " + where_2 else: where_ = where_ + " AND " + where_2 #print 'where_', where_ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else "" #print 'limit_', limit_ cmd = " ".join( ("DELETE", from_, where_, limit_) ) print cmd for retry_ in range(0,2): try: with self.con: #delete host self.cur = self.con.cursor() self.cur.execute(cmd) deleted = self.cur.rowcount return deleted, "%d deleted from %s" % (deleted, sql_dict['FROM'][:-1] ) except (mdb.Error, __HOLE__), e: print "nfvo_db.delete_row DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e, "delete", 'dependencies') if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.delete_row_by_dict
8,489
def get_rows(self,table,uuid): '''get row from a table based on uuid''' for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) self.cur.execute("SELECT * FROM " + str(table) +" where uuid='" + str(uuid) + "'") rows = self.cur.fetchall() return self.cur.rowcount, rows except (mdb.Error, __HOLE__), e: print "nfvo_db.get_rows DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_rows
8,490
def get_table(self, **sql_dict): ''' Obtain rows from a table. Attribute sql_dir: dictionary with the following key: value 'SELECT': list or tuple of fields to retrieve) (by default all) 'FROM': string of table name (Mandatory) 'WHERE': dict of key:values, translated to key=value (key is null) AND ... (Optional) 'WHERE_NOT': dict of key:values, translated to key<>value (key is not null) AND ... (Optional) 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional) 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional 'LIMIT': limit of number of rows (Optional) 'ORDER_BY': list or tuple of fields to order Return: a list with dictionaries at each row ''' #print sql_dict select_= "SELECT " + ("*" if 'SELECT' not in sql_dict else ",".join(map(str,sql_dict['SELECT'])) ) #print 'select_', select_ from_ = "FROM " + str(sql_dict['FROM']) #print 'from_', from_ where_and = "" where_or = "" w=sql_dict.get('WHERE') if w: where_and = " AND ".join(map(self.__tuple2db_format_where, w.iteritems() )) w=sql_dict.get('WHERE_NOT') if w: if where_and: where_and += " AND " where_and += " AND ".join(map(self.__tuple2db_format_where_not, w.iteritems() ) ) w=sql_dict.get('WHERE_OR') if w: where_or = " OR ".join(map(self.__tuple2db_format_where, w.iteritems() )) if where_and and where_or: if sql_dict.get("WHERE_AND_OR") == "AND": where_ = "WHERE " + where_and + " AND (" + where_or + ")" else: where_ = "WHERE (" + where_and + ") OR " + where_or elif where_and and not where_or: where_ = "WHERE " + where_and elif not where_and and where_or: where_ = "WHERE " + where_or else: where_ = "" #print 'where_', where_ limit_ = "LIMIT " + str(sql_dict['LIMIT']) if 'LIMIT' in sql_dict else "" order_ = "ORDER BY " + ",".join(map(str,sql_dict['SELECT'])) if 'ORDER_BY' in sql_dict else "" #print 'limit_', limit_ cmd = " ".join( (select_, from_, where_, limit_, order_) ) for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) print cmd self.cur.execute(cmd) rows = self.cur.fetchall() return self.cur.rowcount, rows except (mdb.Error, __HOLE__), e: print "nfvo_db.get_table DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_table
8,491
def get_table_by_uuid_name(self, table, uuid_name, error_item_text=None, allow_serveral=False, WHERE_OR={}, WHERE_AND_OR="OR"): ''' Obtain One row from a table based on name or uuid. Attribute: table: string of table name uuid_name: name or uuid. If not uuid format is found, it is considered a name allow_severeral: if False return ERROR if more than one row are founded error_item_text: in case of error it identifies the 'item' name for a proper output text 'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional) 'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found if allow_several==True, a list of dictionaries with the row or rows, error if no item is found ''' if error_item_text==None: error_item_text = table what = 'uuid' if af.check_valid_uuid(uuid_name) else 'name' cmd = " SELECT * FROM %s WHERE %s='%s'" % (table, what, uuid_name) if WHERE_OR: where_or = " OR ".join(map(self.__tuple2db_format_where, WHERE_OR.iteritems() )) if WHERE_AND_OR == "AND": cmd += " AND (" + where_or + ")" else: cmd += " OR " + where_or for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) print cmd self.cur.execute(cmd) number = self.cur.rowcount if number==0: return -HTTP_Not_Found, "No %s found with %s '%s'" %(error_item_text, what, uuid_name) elif number>1 and not allow_serveral: return -HTTP_Bad_Request, "More than one %s found with %s '%s'" %(error_item_text, what, uuid_name) if allow_serveral: rows = self.cur.fetchall() else: rows = self.cur.fetchone() return number, rows except (mdb.Error, __HOLE__), e: print "nfvo_db.get_table_by_uuid_name DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_table_by_uuid_name
8,492
def get_uuid(self, uuid): '''check in the database if this uuid is already present''' for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) self.cur.execute("SELECT * FROM uuids where uuid='" + str(uuid) + "'") rows = self.cur.fetchall() return self.cur.rowcount, rows except (mdb.Error, __HOLE__), e: print "nfvo_db.get_uuid DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_uuid
8,493
def new_vnf_as_a_whole(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict): print "Adding new vnf to the NFVO database" for retry_ in range(0,2): created_time = time.time() try: with self.con: myVNFDict = {} myVNFDict["name"] = vnf_name myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor') myVNFDict["public"] = vnf_descriptor['vnf'].get('public', "false") myVNFDict["description"] = vnf_descriptor['vnf']['description'] myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC") myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id") result, vnf_id = self._new_vnf(myVNFDict,nfvo_tenant,created_time) if result < 0: return result, "Error creating vnf in NFVO database: %s" %vnf_id print "VNF %s added to NFVO DB. VNF id: %s" % (vnf_name,vnf_id) print "Adding new vms to the NFVO database" #For each vm, we must create the appropriate vm in the NFVO database. vmDict = {} for _,vm in VNFCDict.iteritems(): #This code could make the name of the vms grow and grow. #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name #vm['name'] = "%s-%s" % (vnf_name,vm['name']) print "VM name: %s. Description: %s" % (vm['name'], vm['description']) vm["vnf_id"] = vnf_id created_time += 0.00001 result, vm_id = self._new_vm(vm,nfvo_tenant,vnf_id,created_time) if result < 0: return result, "Error creating vm in NFVO database: %s" %vm_id print "Internal vm id in NFVO DB: %s" % vm_id vmDict[vm['name']] = vm_id #Collect the data interfaces of each VM/VNFC under the 'numas' field dataifacesDict = {} for vm in vnf_descriptor['vnf']['VNFC']: dataifacesDict[vm['name']] = {} for numa in vm.get('numas', []): for dataiface in numa.get('interfaces',[]): af.convert_bandwidth(dataiface) dataifacesDict[vm['name']][dataiface['name']] = {} dataifacesDict[vm['name']][dataiface['name']]['vpci'] = dataiface['vpci'] dataifacesDict[vm['name']][dataiface['name']]['bw'] = dataiface['bandwidth'] dataifacesDict[vm['name']][dataiface['name']]['model'] = "PF" if dataiface['dedicated']=="yes" else ("VF" if dataiface['dedicated']=="no" else "VFnotShared") #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field bridgeInterfacesDict = {} for vm in vnf_descriptor['vnf']['VNFC']: if 'bridge-ifaces' in vm: bridgeInterfacesDict[vm['name']] = {} for bridgeiface in vm['bridge-ifaces']: af.convert_bandwidth(bridgeiface) bridgeInterfacesDict[vm['name']][bridgeiface['name']] = {} bridgeInterfacesDict[vm['name']][bridgeiface['name']]['vpci'] = bridgeiface.get('vpci',None) bridgeInterfacesDict[vm['name']][bridgeiface['name']]['mac'] = bridgeiface.get('mac_address',None) bridgeInterfacesDict[vm['name']][bridgeiface['name']]['bw'] = bridgeiface.get('bandwidth', None) bridgeInterfacesDict[vm['name']][bridgeiface['name']]['model'] = bridgeiface.get('model', None) #For each internal connection, we add it to the interfaceDict and we create the appropriate net in the NFVO database. print "Adding new nets (VNF internal nets) to the NFVO database (if any)" internalconnList = [] if 'internal-connections' in vnf_descriptor['vnf']: for net in vnf_descriptor['vnf']['internal-connections']: print "CODE TO BE CHECKED" print "Net name: %s. Description: %s" % (net['name'], net['description']) myNetDict = {} myNetDict["name"] = net['name'] myNetDict["description"] = net['description'] myNetDict["type"] = net['type'] myNetDict["vnf_id"] = vnf_id created_time += 0.00001 result, net_id = self._new_net(myNetDict,nfvo_tenant,vnf_id, created_time) if result < 0: return result, "Error creating net in NFVO database: %s" %net_id for element in net['elements']: ifaceItem = {} #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name']) ifaceItem["internal_name"] = element['local_iface_name'] #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])] ifaceItem["vm_id"] = vmDict[element['VNFC']] ifaceItem["net_id"] = net_id ifaceItem["type"] = net['type'] if ifaceItem ["type"] == "data": ifaceItem["vpci"] = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['vpci'] ifaceItem["bw"] = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['bw'] ifaceItem["model"] = dataifacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['model'] else: ifaceItem["vpci"] = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['vpci'] ifaceItem["mac"] = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['mac_address'] ifaceItem["bw"] = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['bw'] ifaceItem["model"] = bridgeInterfacesDict[ element['VNFC'] ][ element['local_iface_name'] ]['model'] internalconnList.append(ifaceItem) print "Internal net id in NFVO DB: %s" % net_id print "Adding internal interfaces to the NFVO database (if any)" for iface in internalconnList: print "Iface name: %s" % iface['internal_name'] created_time += 0.00001 result, iface_id = self._new_interface(iface,nfvo_tenant,vnf_id,created_time) if result < 0: return result, "Error creating iface in NFVO database: %s" %iface_id print "Iface id in NFVO DB: %s" % iface_id print "Adding external interfaces to the NFVO database" for iface in vnf_descriptor['vnf']['external-connections']: myIfaceDict = {} #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name']) myIfaceDict["internal_name"] = iface['local_iface_name'] #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])] myIfaceDict["vm_id"] = vmDict[iface['VNFC']] myIfaceDict["external_name"] = iface['name'] myIfaceDict["type"] = iface['type'] if iface["type"] == "data": myIfaceDict["vpci"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci'] myIfaceDict["bw"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw'] myIfaceDict["model"] = dataifacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model'] else: myIfaceDict["vpci"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['vpci'] myIfaceDict["bw"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['bw'] myIfaceDict["model"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['model'] myIfaceDict["mac"] = bridgeInterfacesDict[ iface['VNFC'] ][ iface['local_iface_name'] ]['mac'] print "Iface name: %s" % iface['name'] created_time += 0.00001 result, iface_id = self._new_interface(myIfaceDict,nfvo_tenant,vnf_id,created_time) if result < 0: return result, "Error creating iface in NFVO database: %s" %iface_id print "Iface id in NFVO DB: %s" % iface_id return 1,vnf_id except (mdb.Error, __HOLE__), e: print "new_vnf_as_a_whole DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.new_vnf_as_a_whole
8,494
def new_scenario(self, scenario_dict): for retry_ in range(0,2): created_time = time.time() try: with self.con: self.cur = self.con.cursor() tenant_id = scenario_dict.get('tenant_id') #scenario INSERT_={'tenant_id': tenant_id, 'name': scenario_dict['name'], 'description': scenario_dict['description'], 'public': scenario_dict.get('public', "false")} r,scenario_uuid = self._new_row_internal('scenarios', INSERT_, tenant_id, True, None, True,created_time) if r<0: print 'nfvo_db.new_scenario Error inserting at table scenarios: ' + scenario_uuid return r,scenario_uuid #sce_nets for net in scenario_dict['nets'].values(): net_dict={'scenario_id': scenario_uuid} net_dict["name"] = net["name"] net_dict["type"] = net["type"] net_dict["description"] = net.get("description") net_dict["external"] = net.get("external", False) if "graph" in net: #net["graph"]=yaml.safe_dump(net["graph"],default_flow_style=True,width=256) #TODO, must be json because of the GUI, change to yaml net_dict["graph"]=json.dumps(net["graph"]) created_time += 0.00001 r,net_uuid = self._new_row_internal('sce_nets', net_dict, tenant_id, True, None, True, created_time) if r<0: print 'nfvo_db.new_scenario Error inserting at table sce_vnfs: ' + net_uuid return r, net_uuid net['uuid']=net_uuid #sce_vnfs for k,vnf in scenario_dict['vnfs'].items(): INSERT_={'scenario_id': scenario_uuid, 'name': k, 'vnf_id': vnf['uuid'], #'description': scenario_dict['name'] 'description': vnf['description'] } if "graph" in vnf: #INSERT_["graph"]=yaml.safe_dump(vnf["graph"],default_flow_style=True,width=256) #TODO, must be json because of the GUI, change to yaml INSERT_["graph"]=json.dumps(vnf["graph"]) created_time += 0.00001 r,scn_vnf_uuid = self._new_row_internal('sce_vnfs', INSERT_, tenant_id, True, scenario_uuid, True, created_time) if r<0: print 'nfvo_db.new_scenario Error inserting at table sce_vnfs: ' + scn_vnf_uuid return r, scn_vnf_uuid vnf['scn_vnf_uuid']=scn_vnf_uuid #sce_interfaces for iface in vnf['ifaces'].values(): print 'iface', iface if 'net_key' not in iface: continue iface['net_id'] = scenario_dict['nets'][ iface['net_key'] ]['uuid'] INSERT_={'sce_vnf_id': scn_vnf_uuid, 'sce_net_id': iface['net_id'], 'interface_id': iface[ 'uuid' ] } created_time += 0.00001 r,iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, tenant_id, True, scenario_uuid, True, created_time) if r<0: print 'nfvo_db.new_scenario Error inserting at table sce_vnfs: ' + iface_uuid return r, iface_uuid return 1, scenario_uuid except (mdb.Error, __HOLE__), e: print "nfvo_db.new_scenario DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.new_scenario
8,495
def edit_scenario(self, scenario_dict): for retry_ in range(0,2): modified_time = time.time() try: with self.con: self.cur = self.con.cursor() #check that scenario exist tenant_id = scenario_dict.get('tenant_id') scenario_uuid = scenario_dict['uuid'] where_text = "uuid='%s'" % scenario_uuid if not tenant_id and tenant_id != "any": where_text += " AND (tenant_id='%s' OR public='True')" % (tenant_id) self.cur.execute("SELECT * FROM scenarios WHERE "+ where_text) self.cur.fetchall() if self.cur.rowcount==0: return -HTTP_Bad_Request, "No scenario found with this criteria " + where_text elif self.cur.rowcount>1: return -HTTP_Bad_Request, "More than one scenario found with this criteria " + where_text #scenario nodes = {} topology = scenario_dict.pop("topology", None) if topology != None and "nodes" in topology: nodes = topology.get("nodes",{}) UPDATE_ = {} if "name" in scenario_dict: UPDATE_["name"] = scenario_dict["name"] if "description" in scenario_dict: UPDATE_["description"] = scenario_dict["description"] if len(UPDATE_)>0: WHERE_={'tenant_id': tenant_id, 'uuid': scenario_uuid} r,c = self.__update_rows('scenarios', UPDATE_, WHERE_, modified_time=modified_time) if r<0: print 'nfvo_db.edit_scenario Error ' + c + ' updating table scenarios: ' + scenario_uuid return r,scenario_uuid #sce_nets for node_id, node in nodes.items(): if "graph" in node: #node["graph"] = yaml.safe_dump(node["graph"],default_flow_style=True,width=256) #TODO, must be json because of the GUI, change to yaml node["graph"] = json.dumps(node["graph"]) WHERE_={'scenario_id': scenario_uuid, 'uuid': node_id} r,c = self.__update_rows('sce_nets', node, WHERE_) if r<=0: r,c = self.__update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time) if r<0: print 'nfvo_db.edit_scenario Error updating table sce_nets,sce_vnfs: ' + scenario_uuid return r, scenario_uuid return 1, scenario_uuid except (mdb.Error, __HOLE__), e: print "nfvo_db.new_scenario DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c # def get_instance_scenario(self, instance_scenario_id, tenant_id=None): # '''Obtain the scenario instance information, filtering by one or serveral of the tenant, uuid or name # instance_scenario_id is the uuid or the name if it is not a valid uuid format # Only one scenario isntance must mutch the filtering or an error is returned # ''' # print "1******************************************************************" # try: # with self.con: # self.cur = self.con.cursor(mdb.cursors.DictCursor) # #scenario table # where_list=[] # if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" ) # if af.check_valid_uuid(instance_scenario_id): # where_list.append( "uuid='" + instance_scenario_id +"'" ) # else: # where_list.append( "name='" + instance_scenario_id +"'" ) # where_text = " AND ".join(where_list) # self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text) # rows = self.cur.fetchall() # if self.cur.rowcount==0: # return -HTTP_Bad_Request, "No scenario instance found with this criteria " + where_text # elif self.cur.rowcount>1: # return -HTTP_Bad_Request, "More than one scenario instance found with this criteria " + where_text # instance_scenario_dict = rows[0] # # #instance_vnfs # self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'") # instance_scenario_dict['instance_vnfs'] = self.cur.fetchall() # for vnf in instance_scenario_dict['instance_vnfs']: # #instance_vms # self.cur.execute("SELECT uuid, vim_vm_id "+ # "FROM instance_vms "+ # "WHERE instance_vnf_id='" + vnf['uuid'] +"'" # ) # vnf['instance_vms'] = self.cur.fetchall() # #instance_nets # self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'") # instance_scenario_dict['instance_nets'] = self.cur.fetchall() # # #instance_interfaces # self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'") # instance_scenario_dict['instance_interfaces'] = self.cur.fetchall() # # af.convert_datetime2str(instance_scenario_dict) # af.convert_str2boolean(instance_scenario_dict, ('public','shared','external') ) # print "2******************************************************************" # return 1, instance_scenario_dict # except (mdb.Error, AttributeError), e: # print "nfvo_db.get_instance_scenario DB Exception %d: %s" % (e.args[0], e.args[1]) # return self.format_error(e)
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.edit_scenario
8,496
def get_scenario(self, scenario_id, tenant_id=None, datacenter_id=None): '''Obtain the scenario information, filtering by one or serveral of the tenant, uuid or name scenario_id is the uuid or the name if it is not a valid uuid format if datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid Only one scenario must mutch the filtering or an error is returned ''' for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) #scenario table if af.check_valid_uuid(scenario_id): where_text = "uuid='%s'" % scenario_id else: where_text = "name='%s'" % scenario_id if not tenant_id and tenant_id != "any": where_text += " AND (tenant_id='%s' OR public='True')" % (tenant_id) cmd = "SELECT * FROM scenarios WHERE "+ where_text print cmd self.cur.execute(cmd) rows = self.cur.fetchall() if self.cur.rowcount==0: return -HTTP_Bad_Request, "No scenario found with this criteria " + where_text elif self.cur.rowcount>1: return -HTTP_Bad_Request, "More than one scenario found with this criteria " + where_text scenario_dict = rows[0] #sce_vnfs cmd = "SELECT uuid,name,vnf_id,description FROM sce_vnfs WHERE scenario_id='%s' ORDER BY created_at" % scenario_dict['uuid'] self.cur.execute(cmd) scenario_dict['vnfs'] = self.cur.fetchall() for vnf in scenario_dict['vnfs']: #sce_interfaces cmd = "SELECT uuid,sce_net_id,interface_id FROM sce_interfaces WHERE sce_vnf_id='%s' ORDER BY created_at" %vnf['uuid'] self.cur.execute(cmd) vnf['interfaces'] = self.cur.fetchall() #vms cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, vms.name as name, vms.description as description " \ " FROM vnfs join vms on vnfs.uuid=vms.vnf_id " \ " WHERE vnfs.uuid='" + vnf['vnf_id'] +"'" \ " ORDER BY vms.created_at" self.cur.execute(cmd) vnf['vms'] = self.cur.fetchall() for vm in vnf['vms']: if datacenter_id!=None: self.cur.execute("SELECT vim_id FROM datacenters_images WHERE image_id='%s' AND datacenter_id='%s'" %(vm['image_id'],datacenter_id)) if self.cur.rowcount==1: vim_image_dict = self.cur.fetchone() vm['vim_image_id']=vim_image_dict['vim_id'] self.cur.execute("SELECT vim_id FROM datacenters_flavors WHERE flavor_id='%s' AND datacenter_id='%s'" %(vm['flavor_id'],datacenter_id)) if self.cur.rowcount==1: vim_flavor_dict = self.cur.fetchone() vm['vim_flavor_id']=vim_flavor_dict['vim_id'] #interfaces cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model" \ " FROM interfaces" \ " WHERE vm_id='%s'" \ " ORDER BY created_at" % vm['uuid'] self.cur.execute(cmd) vm['interfaces'] = self.cur.fetchall() #nets every net of a vms self.cur.execute("SELECT uuid,name,type,description FROM nets WHERE vnf_id='" + vnf['vnf_id'] +"'" ) vnf['nets'] = self.cur.fetchall() #sce_nets cmd = "SELECT uuid,name,type,external,description" \ " FROM sce_nets WHERE scenario_id='%s'" \ " ORDER BY created_at " % scenario_dict['uuid'] self.cur.execute(cmd) scenario_dict['nets'] = self.cur.fetchall() #datacenter_nets for net in scenario_dict['nets']: if str(net['external']) == 'false': continue WHERE_=" WHERE name='%s'" % net['name'] if datacenter_id!=None: WHERE_ += " AND datacenter_id='%s'" % datacenter_id self.cur.execute("SELECT vim_net_id FROM datacenter_nets" + WHERE_ ) d_net = self.cur.fetchone() if d_net==None or datacenter_id==None: #print "nfvo_db.get_scenario() WARNING external net %s not found" % net['name'] net['vim_id']=None else: net['vim_id']=d_net['vim_net_id'] af.convert_datetime2str(scenario_dict) af.convert_str2boolean(scenario_dict, ('public','shared','external') ) return 1, scenario_dict except (mdb.Error, __HOLE__), e: print "nfvo_db.get_scenario DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_scenario
8,497
def get_uuid_from_name(self, table, name): '''Searchs in table the name and returns the uuid ''' for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) where_text = "name='" + name +"'" self.cur.execute("SELECT * FROM " + table + " WHERE "+ where_text) rows = self.cur.fetchall() if self.cur.rowcount==0: return 0, "Name %s not found in table %s" %(name, table) elif self.cur.rowcount>1: return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table) return self.cur.rowcount, rows[0]["uuid"] except (mdb.Error, __HOLE__), e: print "nfvo_db.get_uuid_from_name DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.get_uuid_from_name
8,498
def delete_scenario(self, scenario_id, tenant_id=None): '''Deletes a scenario, filtering by one or several of the tenant, uuid or name scenario_id is the uuid or the name if it is not a valid uuid format Only one scenario must mutch the filtering or an error is returned ''' for retry_ in range(0,2): try: with self.con: self.cur = self.con.cursor(mdb.cursors.DictCursor) #scenario table if af.check_valid_uuid(scenario_id): where_text = "uuid='%s'" % scenario_id else: where_text = "name='%s'" % scenario_id if not tenant_id and tenant_id != "any": where_text += " AND (tenant_id='%s' OR public='True')" % tenant_id self.cur.execute("SELECT * FROM scenarios WHERE "+ where_text) rows = self.cur.fetchall() if self.cur.rowcount==0: return -HTTP_Bad_Request, "No scenario found with this criteria " + where_text elif self.cur.rowcount>1: return -HTTP_Bad_Request, "More than one scenario found with this criteria " + where_text scenario_uuid = rows[0]["uuid"] scenario_name = rows[0]["name"] #sce_vnfs self.cur.execute("DELETE FROM scenarios WHERE uuid='" + scenario_uuid + "'") return 1, scenario_uuid + " " + scenario_name except (mdb.Error, __HOLE__), e: print "nfvo_db.delete_scenario DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e, "delete", "instances running") if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.delete_scenario
8,499
def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict): print "Adding new instance scenario to the NFVO database" for retry_ in range(0,2): created_time = time.time() try: with self.con: self.cur = self.con.cursor() #instance_scenarios datacenter_tenant_id = scenarioDict['datacenter_tenant_id'] datacenter_id = scenarioDict['datacenter_id'] INSERT_={'tenant_id': tenant_id, 'datacenter_tenant_id': datacenter_tenant_id, 'name': instance_scenario_name, 'description': instance_scenario_description, 'scenario_id' : scenarioDict['uuid'], 'datacenter_id': datacenter_id } r,instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, tenant_id, True, None, True, created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_scenarios: ' + instance_uuid return r, instance_uuid net_scene2instance={} #instance_nets #nets interVNF for net in scenarioDict['nets']: INSERT_={'vim_net_id': net['vim_id'], 'external': net['external'], 'instance_scenario_id':instance_uuid } #, 'type': net['type'] INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_id) INSERT_['datacenter_tenant_id'] = net.get('datacenter_tenant_id', datacenter_tenant_id) if net.get("uuid"): INSERT_['sce_net_id'] = net['uuid'] created_time += 0.00001 r,instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, tenant_id, True, instance_uuid, True, created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_nets: ' + instance_net_uuid return r, instance_net_uuid net_scene2instance[ net['uuid'] ] = instance_net_uuid net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid #instance_vnfs for vnf in scenarioDict['vnfs']: INSERT_={'instance_scenario_id': instance_uuid, 'vnf_id': vnf['vnf_id'] } INSERT_['datacenter_id'] = vnf.get('datacenter_id', datacenter_id) INSERT_['datacenter_tenant_id'] = vnf.get('datacenter_tenant_id', datacenter_tenant_id) if vnf.get("uuid"): INSERT_['sce_vnf_id'] = vnf['uuid'] created_time += 0.00001 r,instance_vnf_uuid = self._new_row_internal('instance_vnfs', INSERT_, tenant_id, True, instance_uuid, True,created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_vnfs: ' + instance_vnf_uuid return r, instance_vnf_uuid vnf['uuid'] = instance_vnf_uuid #overwrite scnario uuid by instance uuid #instance_nets #nets intraVNF for net in vnf['nets']: INSERT_={'vim_net_id': net['vim_id'], 'external': 'false', 'instance_scenario_id':instance_uuid } #, 'type': net['type'] INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_id) INSERT_['datacenter_tenant_id'] = net.get('datacenter_tenant_id', datacenter_tenant_id) if net.get("uuid"): INSERT_['net_id'] = net['uuid'] created_time += 0.00001 r,instance_net_uuid = self._new_row_internal('instance_nets', INSERT_, tenant_id, True, instance_uuid, True,created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_nets: ' + instance_net_uuid return r, instance_net_uuid net_scene2instance[ net['uuid'] ] = instance_net_uuid net['uuid'] = instance_net_uuid #overwrite scnario uuid by instance uuid #instance_vms for vm in vnf['vms']: INSERT_={'instance_vnf_id': instance_vnf_uuid, 'vm_id': vm['uuid'], 'vim_vm_id': vm['vim_id'] } created_time += 0.00001 r,instance_vm_uuid = self._new_row_internal('instance_vms', INSERT_, tenant_id, True, instance_uuid, True, created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_vms: ' + instance_vm_uuid return r, instance_vm_uuid vm['uuid'] = instance_vm_uuid #overwrite scnario uuid by instance uuid #instance_interfaces for interface in vm['interfaces']: net_id = interface.get('net_id', None) if net_id is None: #check if is connected to a inter VNFs net for iface in vnf['interfaces']: if iface['interface_id'] == interface['uuid']: net_id = iface.get('sce_net_id', None) break if net_id is None: continue interface_type='external' if interface['external_name'] is not None else 'internal' INSERT_={'instance_vm_id': instance_vm_uuid, 'instance_net_id': net_scene2instance[net_id], 'interface_id': interface['uuid'], 'vim_interface_id': interface.get('vim_id'), 'type': interface_type } #created_time += 0.00001 r,interface_uuid = self._new_row_internal('instance_interfaces', INSERT_, tenant_id, True, instance_uuid, True) #, created_time) if r<0: print 'nfvo_db.new_instance_scenario_as_a_whole() Error inserting at table instance_interfaces: ' + interface_uuid return r, interface_uuid interface['uuid'] = interface_uuid #overwrite scnario uuid by instance uuid return 1, instance_uuid except (mdb.Error, __HOLE__), e: print "new_instance_scenario_as_a_whole DB Exception %d: %s" % (e.args[0], e.args[1]) r,c = self.format_error(e) if r!=-HTTP_Request_Timeout or retry_==1: return r,c
AttributeError
dataset/ETHPy150Open nfvlabs/openmano/openmano/nfvo_db.py/nfvo_db.new_instance_scenario_as_a_whole