text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _collect_variables(names, expressions=None): """ Map labels and expressions to registered variables. Handles argument matching. Example: _collect_variables(names=['zones', 'zone_id'], expressions=['parcels.zone_id']) Would return a dict representing: {'parcels': <DataFrameWrapper for zones>, 'zone_id': <pandas.Series for parcels.zone_id>} Parameters names : list of str List of registered variable names and/or labels. If mixing names and labels, labels must come at the end. expressions : list of str, optional List of registered variable expressions for labels defined at end of `names`. Length must match the number of labels. Returns ------- variables : dict Keys match `names`. Values correspond to registered variables, which may be wrappers or evaluated functions if appropriate. """
# Map registered variable labels to expressions. if not expressions: expressions = [] offset = len(names) - len(expressions) labels_map = dict(tz.concatv( tz.compatibility.zip(names[:offset], names[:offset]), tz.compatibility.zip(names[offset:], expressions))) all_variables = tz.merge(_INJECTABLES, _TABLES) variables = {} for label, expression in labels_map.items(): # In the future, more registered variable expressions could be # supported. Currently supports names of registered variables # and references to table columns. if '.' in expression: # Registered variable expression refers to column. table_name, column_name = expression.split('.') table = get_table(table_name) variables[label] = table.get_column(column_name) else: thing = all_variables[expression] if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)): # Registered variable object is function. variables[label] = thing() else: variables[label] = thing return variables
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_table( table_name, table, cache=False, cache_scope=_CS_FOREVER, copy_col=True): """ Register a table with Orca. Parameters table_name : str Should be globally unique to this table. table : pandas.DataFrame or function If a function, the function should return a DataFrame. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. cache : bool, optional Whether to cache the results of a provided callable. Does not apply if `table` is a DataFrame. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. copy_col : bool, optional Whether to return copies when evaluating columns. Returns ------- wrapped : `DataFrameWrapper` or `TableFuncWrapper` """
if isinstance(table, Callable): table = TableFuncWrapper(table_name, table, cache=cache, cache_scope=cache_scope, copy_col=copy_col) else: table = DataFrameWrapper(table_name, table, copy_col=copy_col) # clear any cached data from a previously registered table table.clear_cached() logger.debug('registering table {!r}'.format(table_name)) _TABLES[table_name] = table return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table( table_name=None, cache=False, cache_scope=_CS_FOREVER, copy_col=True): """ Decorates functions that return DataFrames. Decorator version of `add_table`. Table name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. """
def decorator(func): if table_name: name = table_name else: name = func.__name__ add_table( name, func, cache=cache, cache_scope=cache_scope, copy_col=copy_col) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_table(table_name): """ Get a registered table. Decorated functions will be converted to `DataFrameWrapper`. Parameters table_name : str Returns ------- table : `DataFrameWrapper` """
table = get_raw_table(table_name) if isinstance(table, TableFuncWrapper): table = table() return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_type(table_name): """ Returns the type of a registered table. The type can be either "dataframe" or "function". Parameters table_name : str Returns ------- table_type : {'dataframe', 'function'} """
table = get_raw_table(table_name) if isinstance(table, DataFrameWrapper): return 'dataframe' elif isinstance(table, TableFuncWrapper): return 'function'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_column( table_name, column_name, column, cache=False, cache_scope=_CS_FOREVER): """ Add a new column to a table from a Series or callable. Parameters table_name : str Table with which the column will be associated. column_name : str Name for the column. column : pandas.Series or callable Series should have an index matching the table to which it is being added. If a callable, the function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The function should return a Series. cache : bool, optional Whether to cache the results of a provided callable. Does not apply if `column` is a Series. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. """
if isinstance(column, Callable): column = \ _ColumnFuncWrapper( table_name, column_name, column, cache=cache, cache_scope=cache_scope) else: column = _SeriesWrapper(table_name, column_name, column) # clear any cached data from a previously registered column column.clear_cached() logger.debug('registering column {!r} on table {!r}'.format( column_name, table_name)) _COLUMNS[(table_name, column_name)] = column return column
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column(table_name, column_name=None, cache=False, cache_scope=_CS_FOREVER): """ Decorates functions that return a Series. Decorator version of `add_column`. Series index must match the named table. Column name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. The index of the returned Series must match the named table. """
def decorator(func): if column_name: name = column_name else: name = func.__name__ add_column( table_name, name, func, cache=cache, cache_scope=cache_scope) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _columns_for_table(table_name): """ Return all of the columns registered for a given table. Parameters table_name : str Returns ------- columns : dict of column wrappers Keys will be column names. """
return {cname: col for (tname, cname), col in _COLUMNS.items() if tname == table_name}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_raw_column(table_name, column_name): """ Get a wrapped, registered column. This function cannot return columns that are part of wrapped DataFrames, it's only for columns registered directly through Orca. Parameters table_name : str column_name : str Returns ------- wrapped : _SeriesWrapper or _ColumnFuncWrapper """
try: return _COLUMNS[(table_name, column_name)] except KeyError: raise KeyError('column {!r} not found for table {!r}'.format( column_name, table_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _memoize_function(f, name, cache_scope=_CS_FOREVER): """ Wraps a function for memoization and ties it's cache into the Orca cacheing system. Parameters f : function name : str Name of injectable. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. """
cache = {} @wraps(f) def wrapper(*args, **kwargs): try: cache_key = ( args or None, frozenset(kwargs.items()) if kwargs else None) in_cache = cache_key in cache except TypeError: raise TypeError( 'function arguments must be hashable for memoization') if _CACHING and in_cache: return cache[cache_key] else: result = f(*args, **kwargs) cache[cache_key] = result return result wrapper.__wrapped__ = f wrapper.cache = cache wrapper.clear_cached = lambda: cache.clear() _MEMOIZED[name] = CacheItem(name, wrapper, cache_scope) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_injectable( name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER, memoize=False): """ Add a value that will be injected into other functions. Parameters name : str value If a callable and `autocall` is True then the function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The return value will be passed to any functions using this injectable. In all other cases, `value` will be passed through untouched. autocall : bool, optional Set to True to have injectable functions automatically called (with argument matching) and the result injected instead of the function itself. cache : bool, optional Whether to cache the return value of an injectable function. Only applies when `value` is a callable and `autocall` is True. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. memoize : bool, optional If autocall is False it is still possible to cache function results by setting this flag to True. Cached values are stored in a dictionary keyed by argument values, so the argument values must be hashable. Memoized functions have their caches cleared according to the same rules as universal caching. """
if isinstance(value, Callable): if autocall: value = _InjectableFuncWrapper( name, value, cache=cache, cache_scope=cache_scope) # clear any cached data from a previously registered value value.clear_cached() elif not autocall and memoize: value = _memoize_function(value, name, cache_scope=cache_scope) logger.debug('registering injectable {!r}'.format(name)) _INJECTABLES[name] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def injectable( name=None, autocall=True, cache=False, cache_scope=_CS_FOREVER, memoize=False): """ Decorates functions that will be injected into other functions. Decorator version of `add_injectable`. Name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. """
def decorator(func): if name: n = name else: n = func.__name__ add_injectable( n, func, autocall=autocall, cache=cache, cache_scope=cache_scope, memoize=memoize) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_injectable_func_source_data(name): """ Return data about an injectable function's source, including file name, line number, and source code. Parameters name : str Returns ------- filename : str lineno : int The line number on which the function starts. source : str """
if injectable_type(name) != 'function': raise ValueError('injectable {!r} is not a function'.format(name)) inj = get_raw_injectable(name) if isinstance(inj, _InjectableFuncWrapper): return utils.func_source_data(inj._func) elif hasattr(inj, '__wrapped__'): return utils.func_source_data(inj.__wrapped__) else: return utils.func_source_data(inj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_step(step_name, func): """ Add a step function to Orca. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. Parameters step_name : str func : callable """
if isinstance(func, Callable): logger.debug('registering step {!r}'.format(step_name)) _STEPS[step_name] = _StepFuncWrapper(step_name, func) else: raise TypeError('func must be a callable')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def step(step_name=None): """ Decorates functions that will be called by the `run` function. Decorator version of `add_step`. step name defaults to name of function. The function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The argument name "iter_var" may be used to have the current iteration variable injected. """
def decorator(func): if step_name: name = step_name else: name = func.__name__ add_step(name, func) return func return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def broadcast(cast, onto, cast_on=None, onto_on=None, cast_index=False, onto_index=False): """ Register a rule for merging two tables by broadcasting one onto the other. Parameters cast, onto : str Names of registered tables. cast_on, onto_on : str, optional Column names used for merge, equivalent of ``left_on``/``right_on`` parameters of pandas.merge. cast_index, onto_index : bool, optional Whether to use table indexes for merge. Equivalent of ``left_index``/``right_index`` parameters of pandas.merge. """
logger.debug( 'registering broadcast of table {!r} onto {!r}'.format(cast, onto)) _BROADCASTS[(cast, onto)] = \ Broadcast(cast, onto, cast_on, onto_on, cast_index, onto_index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_broadcasts(tables): """ Get the broadcasts associated with a set of tables. Parameters tables : sequence of str Table names for which broadcasts have been registered. Returns ------- casts : dict of `Broadcast` Keys are tuples of strings like (cast_name, onto_name). """
tables = set(tables) casts = tz.keyfilter( lambda x: x[0] in tables and x[1] in tables, _BROADCASTS) if tables - set(tz.concat(casts.keys())): raise ValueError('Not enough links to merge all tables.') return casts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_broadcast(cast_name, onto_name): """ Get a single broadcast. Broadcasts are stored data about how to do a Pandas join. A Broadcast object is a namedtuple with these attributes: - cast: the name of the table being broadcast - onto: the name of the table onto which "cast" is broadcast - cast_on: The optional name of a column on which to join. None if the table index will be used instead. - onto_on: The optional name of a column on which to join. None if the table index will be used instead. - cast_index: True if the table index should be used for the join. - onto_index: True if the table index should be used for the join. Parameters cast_name : str The name of the table being braodcast. onto_name : str The name of the table onto which `cast_name` is broadcast. Returns ------- broadcast : Broadcast """
if is_broadcast(cast_name, onto_name): return _BROADCASTS[(cast_name, onto_name)] else: raise KeyError( 'no rule found for broadcasting {!r} onto {!r}'.format( cast_name, onto_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _all_reachable_tables(t): """ A generator that provides all the names of tables that can be reached via merges starting at the given target table. """
for k, v in t.items(): for tname in _all_reachable_tables(v): yield tname yield k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _recursive_getitem(d, key): """ Descend into a dict of dicts to return the one that contains a given key. Every value in the dict must be another dict. """
if key in d: return d else: for v in d.values(): return _recursive_getitem(v, key) else: raise KeyError('Key not found: {}'.format(key))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _next_merge(merge_node): """ Gets a node that has only leaf nodes below it. This table and the ones below are ready to be merged to make a new leaf node. """
if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)): return merge_node else: for d in tz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)): return _next_merge(d) else: raise OrcaError('No node found for next merge.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_step_table_names(steps): """ Returns a list of table names injected into the provided steps. Parameters steps: list of str Steps to gather table inputs from. Returns ------- list of str """
table_names = set() for s in steps: table_names |= get_step(s)._tables_used() return list(table_names)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_tables(fname, table_names=None, prefix=None, compress=False, local=False): """ Writes tables to a pandas.HDFStore file. Parameters fname : str File name for HDFStore. Will be opened in append mode and closed at the end of this function. table_names: list of str, optional, default None List of tables to write. If None, all registered tables will be written. prefix: str If not None, used to prefix the output table names so that multiple iterations can go in the same file. compress: boolean Whether to compress output file using standard HDF5-readable zlib compression, default False. """
if table_names is None: table_names = list_tables() tables = (get_table(t) for t in table_names) key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}' # set compression options to zlib level-1 if compress arg is True complib = compress and 'zlib' or None complevel = compress and 1 or 0 with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store: for t in tables: # if local arg is True, store only local columns columns = None if local is True: columns = t.local_columns store[key_template.format(t.name)] = t.to_frame(columns=columns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(steps, iter_vars=None, data_out=None, out_interval=1, out_base_tables=None, out_run_tables=None, compress=False, out_base_local=True, out_run_local=True): """ Run steps in series, optionally repeatedly over some sequence. The current iteration variable is set as a global injectable called ``iter_var``. Parameters steps : list of str List of steps to run identified by their name. iter_vars : iterable, optional The values of `iter_vars` will be made available as an injectable called ``iter_var`` when repeatedly running `steps`. data_out : str, optional An optional filename to which all tables injected into any step in `steps` will be saved every `out_interval` iterations. File will be a pandas HDF data store. out_interval : int, optional Iteration interval on which to save data to `data_out`. For example, 2 will save out every 2 iterations, 5 every 5 iterations. Default is every iteration. The results of the first and last iterations are always included. The input (base) tables are also included and prefixed with `base/`, these represent the state of the system before any steps have been executed. The interval is defined relative to the first iteration. For example, a run begining in 2015 with an out_interval of 2, will write out results for 2015, 2017, etc. out_base_tables: list of str, optional, default None List of base tables to write. If not provided, tables injected into 'steps' will be written. out_run_tables: list of str, optional, default None List of run tables to write. If not provided, tables injected into 'steps' will be written. compress: boolean, optional, default False Whether to compress output file using standard HDF5 zlib compression. Compression yields much smaller files using slightly more CPU. out_base_local: boolean, optional, default True For tables in out_base_tables, whether to store only local columns (True) or both, local and computed columns (False). out_run_local: boolean, optional, default True For tables in out_run_tables, whether to store only local columns (True) or both, local and computed columns (False). """
iter_vars = iter_vars or [None] max_i = len(iter_vars) # get the tables to write out if out_base_tables is None or out_run_tables is None: step_tables = get_step_table_names(steps) if out_base_tables is None: out_base_tables = step_tables if out_run_tables is None: out_run_tables = step_tables # write out the base (inputs) if data_out: add_injectable('iter_var', iter_vars[0]) write_tables(data_out, out_base_tables, 'base', compress=compress, local=out_base_local) # run the steps for i, var in enumerate(iter_vars, start=1): add_injectable('iter_var', var) if var is not None: print('Running iteration {} with iteration value {!r}'.format( i, var)) logger.debug( 'running iteration {} with iteration value {!r}'.format( i, var)) t1 = time.time() for j, step_name in enumerate(steps): add_injectable('iter_step', iter_step(j, step_name)) print('Running step {!r}'.format(step_name)) with log_start_finish( 'run step {!r}'.format(step_name), logger, logging.INFO): step = get_step(step_name) t2 = time.time() step() print("Time to execute step '{}': {:.2f} s".format( step_name, time.time() - t2)) clear_cache(scope=_CS_STEP) print( ('Total time to execute iteration {} ' 'with iteration value {!r}: ' '{:.2f} s').format(i, var, time.time() - t1)) # write out the results for the current iteration if data_out: if (i - 1) % out_interval == 0 or i == max_i: write_tables(data_out, out_run_tables, var, compress=compress, local=out_run_local) clear_cache(scope=_CS_ITER)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def injectables(**kwargs): """ Temporarily add injectables to the pipeline environment. Takes only keyword arguments. Injectables will be returned to their original state when the context manager exits. """
global _INJECTABLES original = _INJECTABLES.copy() _INJECTABLES.update(kwargs) yield _INJECTABLES = original
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def temporary_tables(**kwargs): """ Temporarily set DataFrames as registered tables. Tables will be returned to their original state when the context manager exits. Caching is not enabled for tables registered via this function. """
global _TABLES original = _TABLES.copy() for k, v in kwargs.items(): if not isinstance(v, pd.DataFrame): raise ValueError('tables only accepts DataFrames') add_table(k, v) yield _TABLES = original
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eval_variable(name, **kwargs): """ Execute a single variable function registered with Orca and return the result. Any keyword arguments are temporarily set as injectables. This gives the value as would be injected into a function. Parameters name : str Name of variable to evaluate. Use variable expressions to specify columns. Returns ------- object For injectables and columns this directly returns whatever object is returned by the registered function. For tables this returns a DataFrameWrapper as if the table had been injected into a function. """
with injectables(**kwargs): vars = _collect_variables([name], [name]) return vars[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_frame(self, columns=None): """ Make a DataFrame with the given columns. Will always return a copy of the underlying table. Parameters columns : sequence or string, optional Sequence of the column names desired in the DataFrame. A string can also be passed if only one column is desired. If None all columns are returned, including registered columns. Returns ------- frame : pandas.DataFrame """
extra_cols = _columns_for_table(self.name) if columns is not None: columns = [columns] if isinstance(columns, str) else columns columns = set(columns) set_extra_cols = set(extra_cols) local_cols = set(self.local.columns) & columns - set_extra_cols df = self.local[list(local_cols)].copy() extra_cols = {k: extra_cols[k] for k in (columns & set_extra_cols)} else: df = self.local.copy() with log_start_finish( 'computing {!r} columns for table {!r}'.format( len(extra_cols), self.name), logger): for name, col in extra_cols.items(): with log_start_finish( 'computing column {!r} for table {!r}'.format( name, self.name), logger): df[name] = col() return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_col(self, column_name, series): """ Add or replace a column in the underlying DataFrame. Parameters column_name : str Column to add or replace. series : pandas.Series or sequence Column data. """
logger.debug('updating column {!r} in table {!r}'.format( column_name, self.name)) self.local[column_name] = series
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_type(self, column_name): """ Report column type as one of 'local', 'series', or 'function'. Parameters column_name : str Returns ------- col_type : {'local', 'series', 'function'} 'local' means that the column is part of the registered table, 'series' means the column is a registered Pandas Series, and 'function' means the column is a registered function providing a Pandas Series. """
extra_cols = list_columns_for_table(self.name) if column_name in extra_cols: col = _COLUMNS[(self.name, column_name)] if isinstance(col, _SeriesWrapper): return 'series' elif isinstance(col, _ColumnFuncWrapper): return 'function' elif column_name in self.local_columns: return 'local' raise KeyError('column {!r} not found'.format(column_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_col_from_series(self, column_name, series, cast=False): """ Update existing values in a column from another series. Index values must match in both column and series. Optionally casts data type to match the existing column. Parameters column_name : str series : panas.Series cast: bool, optional, default False """
logger.debug('updating column {!r} in table {!r}'.format( column_name, self.name)) col_dtype = self.local[column_name].dtype if series.dtype != col_dtype: if cast: series = series.astype(col_dtype) else: err_msg = "Data type mismatch, existing:{}, update:{}" err_msg = err_msg.format(col_dtype, series.dtype) raise ValueError(err_msg) self.local.loc[series.index, column_name] = series
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_cached(self): """ Remove cached results from this table's computed columns. """
_TABLE_CACHE.pop(self.name, None) for col in _columns_for_table(self.name).values(): col.clear_cached() logger.debug('cleared cached columns for table {!r}'.format(self.name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _call_func(self): """ Call the wrapped function and return the result wrapped by DataFrameWrapper. Also updates attributes like columns, index, and length. """
if _CACHING and self.cache and self.name in _TABLE_CACHE: logger.debug('returning table {!r} from cache'.format(self.name)) return _TABLE_CACHE[self.name].value with log_start_finish( 'call function to get frame for table {!r}'.format( self.name), logger): kwargs = _collect_variables(names=self._argspec.args, expressions=self._argspec.defaults) frame = self._func(**kwargs) self._columns = list(frame.columns) self._index = frame.index self._len = len(frame) wrapped = DataFrameWrapper(self.name, frame, copy_col=self.copy_col) if self.cache: _TABLE_CACHE[self.name] = CacheItem( self.name, wrapped, self.cache_scope) return wrapped
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_cached(self): """ Remove any cached result of this column. """
x = _COLUMN_CACHE.pop((self.table_name, self.name), None) if x is not None: logger.debug( 'cleared cached value for column {!r} in table {!r}'.format( self.name, self.table_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_cached(self): """ Clear a cached result for this injectable. """
x = _INJECTABLE_CACHE.pop(self.name, None) if x: logger.debug( 'injectable {!r} removed from cache'.format(self.name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _tables_used(self): """ Tables injected into the step. Returns ------- tables : set of str """
args = list(self._argspec.args) if self._argspec.defaults: default_args = list(self._argspec.defaults) else: default_args = [] # Combine names from argument names and argument default values. names = args[:len(args) - len(default_args)] + default_args tables = set() for name in names: parent_name = name.split('.')[0] if is_table(parent_name): tables.add(parent_name) return tables
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def qbe_tree(graph, nodes, root=None): """ Given a graph, nodes to explore and an optinal root, do a breadth-first search in order to return the tree. """
if root: start = root else: index = random.randint(0, len(nodes) - 1) start = nodes[index] # A queue to BFS instead DFS to_visit = deque() cnodes = copy(nodes) visited = set() # Format is (parent, parent_edge, neighbor, neighbor_field) to_visit.append((None, None, start, None)) tree = {} while len(to_visit) != 0 and nodes: parent, parent_edge, v, v_edge = to_visit.pop() # Prune if v in nodes: nodes.remove(v) node = graph[v] if v not in visited and len(node) > 1: visited.add(v) # Preorder process if all((parent, parent_edge, v, v_edge)): if parent not in tree: tree[parent] = [] if (parent_edge, v, v_edge) not in tree[parent]: tree[parent].append((parent_edge, v, v_edge)) if v not in tree: tree[v] = [] if (v_edge, parent, parent_edge) not in tree[v]: tree[v].append((v_edge, parent, parent_edge)) # Iteration for node_edge, neighbor, neighbor_edge in node: value = (v, node_edge, neighbor, neighbor_edge) to_visit.append(value) remove_leafs(tree, cnodes) return tree, (len(nodes) == 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine(items, k=None): """ Create a matrix in wich each row is a tuple containing one of solutions or solution k-esima. """
length_items = len(items) lengths = [len(i) for i in items] length = reduce(lambda x, y: x * y, lengths) repeats = [reduce(lambda x, y: x * y, lengths[i:]) for i in range(1, length_items)] + [1] if k is not None: k = k % length # Python division by default is integer division (~ floor(a/b)) indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i]) for i in range(length_items)] return [items[i][indices[i]] for i in range(length_items)] else: matrix = [] for i, item in enumerate(items): row = [] for subset in item: row.extend([subset] * repeats[i]) times = old_div(length, len(row)) matrix.append(row * times) # Transpose the matrix or return the columns instead rows return list(zip(*matrix))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def pickle_encode(session_dict): "Returns the given session dictionary pickled and encoded as a string." pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL) return base64.encodestring(pickled + get_query_hash(pickled).encode())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def func_source_data(func): """ Return data about a function source, including file name, line number, and source code. Parameters func : object May be anything support by the inspect module, such as a function, method, or class. Returns ------- filename : str lineno : int The line number on which the function starts. source : str """
filename = inspect.getsourcefile(func) lineno = inspect.getsourcelines(func)[1] source = inspect.getsource(func) return filename, lineno, source
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(self): """ Checks that there is almost one field to select """
if any(self.errors): # Don't bother validating the formset unless each form is valid on # its own return (selects, aliases, froms, wheres, sorts, groups_by, params) = self.get_query_parts() if not selects: validation_message = _(u"At least you must check a row to get.") raise forms.ValidationError(validation_message) self._selects = selects self._aliases = aliases self._froms = froms self._wheres = wheres self._sorts = sorts self._groups_by = groups_by self._params = params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_results(self, limit=None, offset=None, query=None, admin_name=None, row_number=False): """ Fetch all results after perform SQL query and """
add_extra_ids = (admin_name is not None) if not query: sql = self.get_raw_query(limit=limit, offset=offset, add_extra_ids=add_extra_ids) else: sql = query if settings.DEBUG: print(sql) cursor = self._db_connection.cursor() cursor.execute(sql, tuple(self._params)) query_results = cursor.fetchall() if admin_name and not self._groups_by: selects = self._get_selects_with_extra_ids() results = [] try: offset = int(offset) except ValueError: offset = 0 for r, row in enumerate(query_results): i = 0 l = len(row) if row_number: result = [(r + offset + 1, u"#row%s" % (r + offset + 1))] else: result = [] while i < l: appmodel, field = selects[i].split(".") appmodel = self._unquote_name(appmodel) field = self._unquote_name(field) try: if appmodel in self._models: _model = self._models[appmodel] _appmodel = u"%s_%s" % (_model._meta.app_label, _model._meta.model_name) else: _appmodel = appmodel admin_url = reverse("%s:%s_change" % ( admin_name, _appmodel), args=[row[i + 1]] ) except NoReverseMatch: admin_url = None result.append((row[i], admin_url)) i += 2 results.append(result) return results else: if row_number: results = [] for r, row in enumerate(query_results): result = [r + 1] for cell in row: result.append(cell) results.append(result) return results else: return query_results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_content_type(content_type): """ Return a tuple of content type and charset. :param content_type: A string describing a content type. """
if '; charset=' in content_type: return tuple(content_type.split('; charset=')) else: if 'text' in content_type: encoding = 'ISO-8859-1' else: try: format = formats.find_by_content_type(content_type) except formats.UnknownFormat: encoding = 'ISO-8859-1' else: encoding = format.default_encoding or 'ISO-8859-1' return (content_type, encoding)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_http_accept_header(header): """ Return a list of content types listed in the HTTP Accept header ordered by quality. :param header: A string describing the contents of the HTTP Accept header. """
components = [item.strip() for item in header.split(',')] l = [] for component in components: if ';' in component: subcomponents = [item.strip() for item in component.split(';')] l.append( ( subcomponents[0], # eg. 'text/html' subcomponents[1][2:] # eg. 'q=0.9' ) ) else: l.append((component, '1')) l.sort( key = lambda i: i[1], reverse = True ) content_types = [] for i in l: content_types.append(i[0]) return content_types
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_multipart_data(request): """ Parse a request with multipart data. :param request: A HttpRequest instance. """
return MultiPartParser( META=request.META, input_data=StringIO(request.body), upload_handlers=request.upload_handlers, encoding=request.encoding ).parse()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def override_supported_formats(formats): """ Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``. """
def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): self.supported_formats = formats return function(self, *args, **kwargs) return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def route(regex, method, name): """ Route the decorated view. :param regex: A string describing a regular expression to which the request path will be matched. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern. """
def decorator(function): function.route = routes.route( regex = regex, view = function.__name__, method = method, name = name ) @wraps(function) def wrapper(self, *args, **kwargs): return function(self, *args, **kwargs) return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def before(method_name): """ Run the given method prior to the decorated view. If you return anything besides ``None`` from the given method, its return values will replace the arguments of the decorated view. If you return an instance of ``HttpResponse`` from the given method, Respite will return it immediately without delegating the request to the decorated view. Example usage:: class ArticleViews(Views): @before('_load') def show(self, request, article): return self._render( request = request, template = 'show', context = { 'article': article } ) def _load(self, request, id): try: return request, Article.objects.get(id=id) except Article.DoesNotExist: return self._error(request, 404, message='The article could not be found.') :param method: A string describing a class method. """
def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): returns = getattr(self, method_name)(*args, **kwargs) if returns is None: return function(self, *args, **kwargs) else: if isinstance(returns, HttpResponse): return returns else: return function(self, *returns) return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(self, request): """Render a list of objects."""
objects = self.model.objects.all() return self._render( request = request, template = 'index', context = { cc2us(pluralize(self.model.__name__)): objects, }, status = 200 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new(self, request): """Render a form to create a new object."""
form = (self.form or generate_form(self.model))() return self._render( request = request, template = 'new', context = { 'form': form }, status = 200 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def edit(self, request, id): """Render a form to edit an object."""
try: object = self.model.objects.get(id=id) except self.model.DoesNotExist: return self._render( request = request, template = '404', context = { 'error': 'The %s could not be found.' % self.model.__name__.lower() }, status = 404, prefix_template_path = False ) form = (self.form or generate_form(self.model))(instance=object) # Add "_method" field to override request method to PUT form.fields['_method'] = CharField(required=True, initial='PUT', widget=HiddenInput) return self._render( request = request, template = 'edit', context = { cc2us(self.model.__name__): object, 'form': form }, status = 200 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, request, id): """Update an object."""
try: object = self.model.objects.get(id=id) except self.model.DoesNotExist: return self._render( request = request, template = '404', context = { 'error': 'The %s could not be found.' % self.model.__name__.lower() }, status = 404, prefix_template_path = False ) fields = [] for field in request.PATCH: try: self.model._meta.get_field_by_name(field) except FieldDoesNotExist: continue else: fields.append(field) Form = generate_form( model = self.model, form = self.form, fields = fields ) form = Form(request.PATCH, instance=object) if form.is_valid(): object = form.save() return self.show(request, id) else: return self._render( request = request, template = 'edit', context = { 'form': form }, status = 400 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(self, request, id): """Replace an object."""
try: object = self.model.objects.get(id=id) except self.model.DoesNotExist: return self._render( request = request, template = '404', context = { 'error': 'The %s could not be found.' % self.model.__name__.lower() }, status = 404, prefix_template_path = False ) form = (self.form or generate_form(self.model))(request.PUT, instance=object) if form.is_valid(): object = form.save() return self.show(request, id) else: return self._render( request = request, template = 'edit', context = { 'form': form }, status = 400 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_q(fields_dict, params_dict, request=None): """ Returns a Q object from filters config and actual parmeters. """
# Building search query # queries generated by different search_fields are ANDed # if a search field is defined for more than one field, are put together with OR and_query = Q() for fieldname in fields_dict: search_field = fields_dict[fieldname] if fieldname in params_dict and params_dict[fieldname] != '' and params_dict[fieldname] != []: or_query = None if type(search_field) == type(list()): field_list = search_field search_operator = "__icontains" fixed_filters = None multiple_values = False custom_query_method = None value_mapper = None else: # dictionary of field definitions if search_field.get('ignore', False): continue field_list = search_field['fields'] search_operator = search_field.get('operator', None) fixed_filters = search_field.get('fixed_filters', None) multiple_values = search_field.get('multiple', False) custom_query_method = search_field.get('custom_query', None) value_mapper = search_field.get('value_mapper', None) for model_field in field_list: if multiple_values: if hasattr(params_dict, "getlist"): request_field_value = params_dict.getlist(fieldname) elif type(params_dict[fieldname]) == list: request_field_value = params_dict[fieldname] else: request_field_value = [params_dict[fieldname]] if value_mapper: request_field_value = [value_mapper(value) for value in request_field_value] else: request_field_value = params_dict[fieldname] if not value_mapper else value_mapper(params_dict[fieldname]) if not custom_query_method: fieldname_key = model_field + search_operator filter_dict = { fieldname_key : request_field_value} if not or_query: or_query = Q(**filter_dict) else: or_query = or_query | Q(**filter_dict) else: #TODO: this is a hack for using request data in custom_query #it would be better to pass ALSO the request to custom_query_method if not request: cf = custom_query_method(model_field, request_field_value, params_dict) else: cf = custom_query_method(model_field, request_field_value, request) if not or_query: or_query = cf else: or_query = or_query | cf #fixed_filters fixed_filters_q = Q() #fixed_filters must return a Q object or None if fixed_filters: if callable(fixed_filters): fixed_filters_q = fixed_filters(params_dict) elif type(fixed_filters) is dict: fixed_filters_q = Q(**fixed_filters) and_query = and_query & or_query and_query = and_query & fixed_filters_q return and_query
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_search_fields(cls): """ Returns search fields in sfdict """
sfdict = {} for klass in tuple(cls.__bases__) + (cls, ): if hasattr(klass, 'search_fields'): sfdict.update(klass.search_fields) return sfdict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(identifier): """ Find and return a format by name, acronym or extension. :param identifier: A string describing the format. """
for format in FORMATS: if identifier in [format.name, format.acronym, format.extension]: return format raise UnknownFormat('No format found with name, acronym or extension "%s"' % identifier)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_by_name(name): """ Find and return a format by name. :param name: A string describing the name of the format. """
for format in FORMATS: if name == format.name: return format raise UnknownFormat('No format found with name "%s"' % name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_by_extension(extension): """ Find and return a format by extension. :param extension: A string describing the extension of the format. """
for format in FORMATS: if extension in format.extensions: return format raise UnknownFormat('No format found with extension "%s"' % extension)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_by_content_type(content_type): """ Find and return a format by content type. :param content_type: A string describing the internet media type of the format. """
for format in FORMATS: if content_type in format.content_types: return format raise UnknownFormat('No format found with content type "%s"' % content_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def options(self, request, map, *args, **kwargs): """List communication options."""
options = {} for method, function in map.items(): options[method] = function.__doc__ return self._render( request = request, template = 'options', context = { 'options': options }, status = 200, headers = { 'Allow': ', '.join(options.keys()) } )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_format(self, request): """ Determine and return a 'formats.Format' instance describing the most desired response format that is supported by these views. :param request: A django.http.HttpRequest instance. Formats specified by extension (e.g. '/articles/index.html') take precedence over formats given in the HTTP Accept header, even if it's a format that isn't known by Respite. If the request doesn't specify a format by extension (e.g. '/articles/' or '/articles/new') and none of the formats in the HTTP Accept header are supported, Respite will fall back on the format given in DEFAULT_FORMAT. """
# Derive a list of 'formats.Format' instances from the list of formats these views support. supported_formats = [formats.find(format) for format in self.supported_formats] # Determine format by extension... if '.' in request.path: extension = request.path.split('.')[-1] try: format = formats.find_by_extension(extension) except formats.UnknownFormat: return None if format in supported_formats: return format else: return None # Determine format by HTTP Accept header... if 'HTTP_ACCEPT' in request.META: content_types = parse_http_accept_header(request.META['HTTP_ACCEPT']) # Only consider 'accept' headers with a single format in an attempt to play nice # with browsers that ask for formats they really should not want. if len(content_types) == 1: content_type = content_types[0] # If the request has no preference as to the format of its response, prefer the # first of the view's supported formats. if content_type == '*/*': return supported_formats[0] try: format = formats.find_by_content_type(content_type) except formats.UnknownFormat: return None if format in supported_formats: return format else: return None # If no format is given by either extension or header, default to the format given in # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view). if DEFAULT_FORMAT: format = formats.find(DEFAULT_FORMAT) if format in supported_formats: return format else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _render(self, request, template=None, status=200, context={}, headers={}, prefix_template_path=True): """ Render a HTTP response. :param request: A django.http.HttpRequest instance. :param template: A string describing the path to a template. :param status: An integer describing the HTTP status code to respond with. :param context: A dictionary describing variables to populate the template with. :param headers: A dictionary describing HTTP headers. :param prefix_template_path: A boolean describing whether to prefix the template with the view's template path. Please note that ``template`` must not specify an extension, as one will be appended according to the request format. For example, a value of ``blog/posts/index`` would populate ``blog/posts/index.html`` for requests that query the resource's HTML representation. If no template that matches the request format exists at the given location, or if ``template`` is ``None``, Respite will attempt to serialize the template context automatically. You can change the way your models are serialized by defining ``serialize`` methods that return a dictionary:: class NuclearMissile(models.Model): serial_number = models.IntegerField() is_armed = models.BooleanField() launch_code = models.IntegerField() def serialize(self): return { 'serial_number': self.serial_number, 'is_armed': self.is_armed } If the request format is not supported by the view (as determined by the ``supported_formats`` property or a specific view's ``override_supported_formats`` decorator), this function will yield HTTP 406 Not Acceptable. """
format = self._get_format(request) # Render 406 Not Acceptable if the requested format isn't supported. if not format: return HttpResponse(status=406) if template: if prefix_template_path: template_path = '%s.%s' % (self.template_path + template, format.extension) else: template_path = '%s.%s' % (template, format.extension) try: response = render( request = request, template_name = template_path, dictionary = context, status = status, content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET) ) except TemplateDoesNotExist: try: response = HttpResponse( content = serializers.find(format)(context).serialize(request), content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET), status = status ) except serializers.UnknownSerializer: raise self.Error( 'No template exists at %(template_path)s, and no serializer found for %(format)s' % { 'template_path': template_path, 'format': format } ) else: response = HttpResponse( content = serializers.find(format)(context).serialize(request), content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET), status = status ) for header, value in headers.items(): response[header] = value return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs): """ Convenience method to render an error response. The template is inferred from the status code. :param request: A django.http.HttpRequest instance. :param status: An integer describing the HTTP status code to respond with. :param headers: A dictionary describing HTTP headers. :param prefix_template_path: A boolean describing whether to prefix the template with the view's template path. :param kwargs: Any additional keyword arguments to inject. These are wrapped under ``error`` for convenience. For implementation details, see ``render`` """
return self._render( request = request, template = str(status), status = status, context = { 'error': kwargs }, headers = headers, prefix_template_path = prefix_template_path )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(format): """ Find and return a serializer for the given format. Arguments: format -- A Format instance. """
try: serializer = SERIALIZERS[format] except KeyError: raise UnknownSerializer('No serializer found for %s' % format.acronym) return serializer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the search form. """
update_data ={} sfdict = self.filter_class.get_search_fields() for fieldname in sfdict: try: has_multiple = sfdict[fieldname].get('multiple', False) except: has_multiple = False if has_multiple: value = self.request.GET.getlist(fieldname, []) else: value = self.request.GET.get(fieldname, None) update_data[fieldname] = value if self.order_field: update_data[self.order_field] = self.request.GET.get(self.order_field, None) initial = self.get_initial() initial.update(update_data) kwargs = {'initial': initial } if self.groups_for_userlist != None: pot_users = User.objects.exclude(id=self.request.user.id) if len(self.groups_for_userlist): pot_users = pot_users.filter(groups__name__in = self.groups_for_userlist) pot_users = pot_users.distinct().order_by('username') user_choices = tuple([(user.id, str(user)) for user in pot_users]) kwargs['user_choices'] = user_choices return kwargs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pluralize(word) : """Pluralize an English noun."""
rules = [ ['(?i)(quiz)$' , '\\1zes'], ['^(?i)(ox)$' , '\\1en'], ['(?i)([m|l])ouse$' , '\\1ice'], ['(?i)(matr|vert|ind)ix|ex$' , '\\1ices'], ['(?i)(x|ch|ss|sh)$' , '\\1es'], ['(?i)([^aeiouy]|qu)ies$' , '\\1y'], ['(?i)([^aeiouy]|qu)y$' , '\\1ies'], ['(?i)(hive)$' , '\\1s'], ['(?i)(?:([^f])fe|([lr])f)$' , '\\1\\2ves'], ['(?i)sis$' , 'ses'], ['(?i)([ti])um$' , '\\1a'], ['(?i)(buffal|tomat)o$' , '\\1oes'], ['(?i)(bu)s$' , '\\1ses'], ['(?i)(alias|status)' , '\\1es'], ['(?i)(octop|vir)us$' , '\\1i'], ['(?i)(ax|test)is$' , '\\1es'], ['(?i)s$' , 's'], ['(?i)$' , 's'] ] uncountable_words = ['equipment', 'information', 'rice', 'money', 'species', 'series', 'fish', 'sheep'] irregular_words = { 'person' : 'people', 'man' : 'men', 'child' : 'children', 'sex' : 'sexes', 'move' : 'moves' } lower_cased_word = word.lower(); for uncountable_word in uncountable_words: if lower_cased_word[-1*len(uncountable_word):] == uncountable_word : return word for irregular in irregular_words.keys(): match = re.search('('+irregular+')$',word, re.IGNORECASE) if match: return re.sub('(?i)'+irregular+'$', match.expand('\\1')[0]+irregular_words[irregular][1:], word) for rule in range(len(rules)): match = re.search(rules[rule][0], word, re.IGNORECASE) if match : groups = match.groups() for k in range(0,len(groups)) : if groups[k] == None : rules[rule][1] = rules[rule][1].replace('\\'+str(k+1), '') return re.sub(rules[rule][0], rules[rule][1], word) return word
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def us2mc(string): """Transform an underscore_case string to a mixedCase string"""
return re.sub(r'_([a-z])', lambda m: (m.group(1).upper()), string)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_form(model, form=None, fields=False, exclude=False): """ Generate a form from a model. :param model: A Django model. :param form: A Django form. :param fields: A list of fields to include in this form. :param exclude: A list of fields to exclude in this form. """
_model, _fields, _exclude = model, fields, exclude class Form(form or forms.ModelForm): class Meta: model = _model if _fields is not False: fields = _fields if _exclude is not False: exclude = _exclude return Form
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_double_norm(mean, std_upper, std_lower, size): """Note that this function requires Scipy."""
from scipy.special import erfinv # There's probably a better way to do this. We first draw percentiles # uniformly between 0 and 1. We want the peak of the distribution to occur # at `mean`. However, if we assign 50% of the samples to the lower half # and 50% to the upper half, the side with the smaller variance will be # overrepresented because of the 1/sigma normalization of the Gaussian # PDF. Therefore we need to divide points between the two halves with a # fraction `cutoff` (defined below) going to the lower half. Having # partitioned them this way, we can then use the standard Gaussian # quantile function to go from percentiles to sample values -- except that # we must remap from [0, cutoff] to [0, 0.5] and from [cutoff, 1] to [0.5, # 1]. samples = np.empty(size) percentiles = np.random.uniform(0., 1., size) cutoff = std_lower / (std_lower + std_upper) w = (percentiles < cutoff) percentiles[w] *= 0.5 / cutoff samples[w] = mean + np.sqrt(2) * std_lower * erfinv(2 * percentiles[w] - 1) w = ~w percentiles[w] = 1 - (1 - percentiles[w]) * 0.5 / (1 - cutoff) samples[w] = mean + np.sqrt(2) * std_upper * erfinv(2 * percentiles[w] - 1) return samples
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_gamma_params(mode, std): """Given a modal value and a standard deviation, compute corresponding parameters for the gamma distribution. Intended to be used to replace normal distributions when the value must be positive and the uncertainty is comparable to the best value. Conversion equations determined from the relations given in the sample_gamma() docs. """
if mode < 0: raise ValueError('input mode must be positive for gamma; got %e' % mode) var = std**2 beta = (mode + np.sqrt(mode**2 + 4 * var)) / (2 * var) j = 2 * var / mode**2 alpha = (j + 1 + np.sqrt(2 * j + 1)) / j if alpha <= 1: raise ValueError('couldn\'t compute self-consistent gamma parameters: ' 'mode=%e std=%e alpha=%e beta=%e' % (mode, std, alpha, beta)) return alpha, beta
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _lval_add_towards_polarity(x, polarity): """Compute the appropriate Lval "kind" for the limit of value `x` towards `polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and the infinity direction of polarity. """
if x < 0: if polarity < 0: return Lval('toinf', x) return Lval('pastzero', x) elif polarity > 0: return Lval('toinf', x) return Lval('pastzero', x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def limtype(msmt): """Return -1 if this value is some kind of upper limit, 1 if this value is some kind of lower limit, 0 otherwise."""
if np.isscalar(msmt): return 0 if isinstance(msmt, Uval): return 0 if isinstance(msmt, Lval): if msmt.kind == 'undef': raise ValueError('no simple limit type for Lval %r' % msmt) # Quasi-hack here: limits of ('tozero', [positive number]) are # reported as upper limits. In a plot full of fluxes this would be # what makes sense, but note that this would be misleading if the # quantity in question was something that could go negative. p = msmt._polarity() if p == -2 or p == 1: return -1 if p == 2 or p == -1: return 1 return 0 if isinstance(msmt, Textual): return msmt.limtype() raise ValueError('don\'t know how to treat %r as a measurement' % msmt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_pcount(nevents): """We assume a Poisson process. nevents is the number of events in some interval. The distribution of values is the distribution of the Poisson rate parameter given this observed number of events, where the "rate" is in units of events per interval of the same duration. The max-likelihood value is nevents, but the mean value is nevents + 1. The gamma distribution is obtained by assuming an improper, uniform prior for the rate between 0 and infinity."""
if nevents < 0: raise ValueError('Poisson parameter `nevents` must be nonnegative') return Uval(np.random.gamma(nevents + 1, size=uval_nsamples))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def repvals(self, method): """Compute representative statistical values for this Uval. `method` may be either 'pct' or 'gauss'. Returns (best, plus_one_sigma, minus_one_sigma), where `best` is the "best" value in some sense, and the others correspond to values at the ~84 and 16 percentile limits, respectively. Because of the sampled nature of the Uval system, there is no single method to compute these numbers. The "pct" method returns the 50th, 15.866th, and 84.134th percentile values. The "gauss" method computes the mean μ and standard deviation σ of the samples and returns [μ, μ+σ, μ-σ]. """
if method == 'pct': return pk_scoreatpercentile(self.d, [50., 84.134, 15.866]) if method == 'gauss': m, s = self.d.mean(), self.d.std() return np.asarray([m, m + s, m - s]) raise ValueError('unknown representative-value method "%s"' % method)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def repval(self, limitsok=False): """Get a best-effort representative value as a float. This can be DANGEROUS because it discards limit information, which is rarely wise."""
if not limitsok and self.dkind in ('lower', 'upper'): raise LimitError() if self.dkind == 'unif': lower, upper = map(float, self.data) v = 0.5 * (lower + upper) elif self.dkind in _noextra_dkinds: v = float(self.data) elif self.dkind in _yesextra_dkinds: v = float(self.data[0]) else: raise RuntimeError('can\'t happen') if self.tkind == 'log10': return 10**v return v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def moreland_adjusthue (msh, m_unsat): """Moreland's AdjustHue procedure to adjust the hue value of an Msh color *msh* should be of of shape (3, ). *m_unsat* is a scalar. Return value is the adjusted h (hue) value. """
if msh[M] >= m_unsat: return msh[H] # "Best we can do" hspin = (msh[S] * np.sqrt (m_unsat**2 - msh[M]**2) / (msh[M] * np.sin (msh[S]))) if msh[H] > -np.pi / 3: # "Spin away from purple" return msh[H] + hspin return msh[H] - hspin
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_datasets_in_nodes(): """ Get the node associated with each dataset. Some datasets will have an ambiguous node since they exists in more than one node. """
data_dir = os.path.join(scriptdir, "..", "usgs", "data") cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data']) ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data']) hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data']) lpcs = map(lambda d: d["datasetName"], api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data']) # Create mapping from dataset to node datasets = {} datasets.update( { ds : "CWIC" for ds in cwic } ) datasets.update( { ds : "EE" for ds in ee } ) datasets.update( { ds : "HDDS" for ds in hdds } ) datasets.update( { ds : "LPCS" for ds in lpcs } ) datasets_path = os.path.join(data_dir, "datasets.json") with open(datasets_path, "w") as f: f.write(json.dumps(datasets)) # Find the datasets with ambiguous nodes cwic_ee = [ds for ds in cwic if ds in ee] cwic_hdds = [ds for ds in cwic if ds in hdds] cwic_lpcs = [ds for ds in cwic if ds in lpcs] ee_hdds = [ds for ds in ee if ds in hdds] ee_lpcs = [ds for ds in ee if ds in lpcs] hdds_lpcs = [ds for ds in hdds if ds in lpcs]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pivot_wavelength_ee(bpass): """Compute pivot wavelength assuming equal-energy convention. `bpass` should have two properties, `resp` and `wlen`. The units of `wlen` can be anything, and `resp` need not be normalized in any particular way. """
from scipy.integrate import simps return np.sqrt(simps(bpass.resp, bpass.wlen) / simps(bpass.resp / bpass.wlen**2, bpass.wlen))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_std_registry(): """Get a Registry object pre-filled with information for standard telescopes. """
from six import itervalues reg = Registry() for fn in itervalues(builtin_registrars): fn(reg) return reg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pivot_wavelength(self): """Get the bandpass' pivot wavelength. Unlike calc_pivot_wavelength(), this function will use a cached value if available. """
wl = self.registry._pivot_wavelengths.get((self.telescope, self.band)) if wl is not None: return wl wl = self.calc_pivot_wavelength() self.registry.register_pivot_wavelength(self.telescope, self.band, wl) return wl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_halfmax_points(self): """Calculate the wavelengths of the filter half-maximum values. """
d = self._ensure_data() return interpolated_halfmax_points(d.wlen, d.resp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def halfmax_points(self): """Get the bandpass' half-maximum wavelengths. These can be used to compute a representative bandwidth, or for display purposes. Unlike calc_halfmax_points(), this function will use a cached value if available. """
t = self.registry._halfmaxes.get((self.telescope, self.band)) if t is not None: return t t = self.calc_halfmax_points() self.registry.register_halfmaxes(self.telescope, self.band, t[0], t[1]) return t
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bands(self, telescope): """Return a list of bands associated with the specified telescope."""
q = self._seen_bands.get(telescope) if q is None: return [] return list(q)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_pivot_wavelength(self, telescope, band, wlen): """Register precomputed pivot wavelengths."""
if (telescope, band) in self._pivot_wavelengths: raise AlreadyDefinedError('pivot wavelength for %s/%s already ' 'defined', telescope, band) self._note(telescope, band) self._pivot_wavelengths[telescope,band] = wlen return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_halfmaxes(self, telescope, band, lower, upper): """Register precomputed half-max points."""
if (telescope, band) in self._halfmaxes: raise AlreadyDefinedError('half-max points for %s/%s already ' 'defined', telescope, band) self._note(telescope, band) self._halfmaxes[telescope,band] = (lower, upper) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_bpass(self, telescope, klass): """Register a Bandpass class."""
if telescope in self._bpass_classes: raise AlreadyDefinedError('bandpass class for %s already ' 'defined', telescope) self._note(telescope, None) self._bpass_classes[telescope] = klass return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, telescope, band): """Get a Bandpass object for a known telescope and filter."""
klass = self._bpass_classes.get(telescope) if klass is None: raise NotDefinedError('bandpass data for %s not defined', telescope) bp = klass() bp.registry = self bp.telescope = telescope bp.band = band return bp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_data(self, band): """From the WISE All-Sky Explanatory Supplement, IV.4.h.i.1, and Jarrett+ 2011. These are relative response per erg and so can be integrated directly against F_nu spectra. Wavelengths are in micron, uncertainties are in parts per thousand. """
# `band` should be 1, 2, 3, or 4. df = bandpass_data_frame('filter_wise_' + str(band) + '.dat', 'wlen resp uncert') df.wlen *= 1e4 # micron to Angstrom df.uncert *= df.resp / 1000. # parts per thou. to absolute values. lo, hi = self._filter_subsets[band] df = df[lo:hi] # clip zero parts of response. return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_comment_body(body): """Returns given comment HTML as plaintext. Converts all HTML tags and entities within 4chan comments into human-readable text equivalents. """
body = _parser.unescape(body) body = re.sub(r'<a [^>]+>(.+?)</a>', r'\1', body) body = body.replace('<br>', '\n') body = re.sub(r'<.+?>', '', body) return body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_wcs (fitsheader): """For compatibility between astropy and pywcs."""
wcsmodule = _load_wcs_module () is_pywcs = hasattr (wcsmodule, 'UnitConverter') wcs = wcsmodule.WCS (fitsheader) wcs.wcs.set () wcs.wcs.fix () # I'm interested in MJD computation via datfix() if hasattr (wcs, 'wcs_pix2sky'): wcs.wcs_pix2world = wcs.wcs_pix2sky wcs.wcs_world2pix = wcs.wcs_sky2pix return wcs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sanitize_unicode(item): """Safely pass string values to the CASA tools. item A value to be passed to a CASA tool. In Python 2, the bindings to CASA tasks expect to receive all string values as binary data (:class:`str`) and not Unicode. But :mod:`pwkit` often uses the ``from __future__ import unicode_literals`` statement to prepare for Python 3 compatibility, and other Python modules are getting better about using Unicode consistently, so more and more module code ends up using Unicode strings in cases where they might get exposed to CASA. Doing so will lead to errors. This helper function converts Unicode into UTF-8 encoded bytes for arguments that you might pass to a CASA tool. It will leave non-strings unchanged and recursively transform collections, so you can safely use it just about anywhere. I usually import this as just ``b`` and write ``tool.method(b(arg))``, in analogy with the ``b''`` byte string syntax. This leads to code such as:: from pwkit.environments.casa.util import tools, sanitize_unicode as b tb = tools.table() path = u'data.ms' tb.open(path) # => raises exception tb.open(b(path)) # => works """
if isinstance(item, text_type): return item.encode('utf8') if isinstance(item, dict): return dict((sanitize_unicode(k), sanitize_unicode(v)) for k, v in six.iteritems(item)) if isinstance(item,(list, tuple)): return item.__class__(sanitize_unicode(x) for x in item) from ...io import Path if isinstance(item, Path): return str(item) return item
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datadir(*subdirs): """Get a path within the CASA data directory. subdirs Extra elements to append to the returned path. This function locates the directory where CASA resource data files (tables of time offsets, calibrator models, etc.) are stored. If called with no arguments, it simply returns that path. If arguments are provided, they are appended to the returned path using :func:`os.path.join`, making it easy to construct the names of specific data files. For instance:: from pwkit.environments.casa import util cal_image_path = util.datadir('nrao', 'VLA', 'CalModels', '3C286_C.im') tb = util.tools.image() tb.open(cal_image_path) """
import os.path data = None if 'CASAPATH' in os.environ: data = os.path.join(os.environ['CASAPATH'].split()[0], 'data') if data is None: # The Conda CASA directory layout: try: import casadef except ImportError: pass else: data = os.path.join(os.path.dirname(casadef.task_directory), 'data') if not os.path.isdir(data): # Sigh, hack for CASA 4.7 + Conda; should be straightened out: dn = os.path.dirname data = os.path.join(dn(dn(dn(casadef.task_directory))), 'lib', 'casa', 'data') if not os.path.isdir(data): data = None if data is None: import casac prevp = None p = os.path.dirname(casac.__file__) while len(p) and p != prevp: data = os.path.join(p, 'data') if os.path.isdir(data): break prevp = p p = os.path.dirname(p) if not os.path.isdir(data): raise RuntimeError('cannot identify CASA data directory') return os.path.join(data, *subdirs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logger(filter='WARN'): """Set up CASA to write log messages to standard output. filter The log level filter: less urgent messages will not be shown. Valid values This function creates and returns a CASA ”log sink” object that is configured to write to standard output. The default CASA implementation would *always* create a file named ``casapy.log`` in the current directory; this function safely prevents such a file from being left around. This is particularly important if you don’t have write permissions to the current directory. """
import os, shutil, tempfile cwd = os.getcwd() tempdir = None try: tempdir = tempfile.mkdtemp(prefix='casautil') try: os.chdir(tempdir) sink = tools.logsink() sink.setlogfile(sanitize_unicode(os.devnull)) try: os.unlink('casapy.log') except OSError as e: if e.errno != 2: raise # otherwise, it's a ENOENT, in which case, no worries. finally: os.chdir(cwd) finally: if tempdir is not None: shutil.rmtree(tempdir, onerror=_rmtree_error) sink.showconsole(True) sink.setglobal(True) sink.filter(sanitize_unicode(filter.upper())) return sink
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def forkandlog(function, filter='INFO5', debug=False): """Fork a child process and read its CASA log output. function A function to run in the child process filter The CASA log level filter to apply in the child process: less urgent messages will not be shown. Valid values are strings: "DEBUG1", "INFO5", debug If true, the standard output and error of the child process are *not* redirected to /dev/null. Some CASA tools produce important results that are *only* provided via log messages. This is a problem for automation, since there’s no way for Python code to intercept those log messages and extract the results of interest. This function provides a framework for working around this limitation: by forking a child process and sending its log output to a pipe, the parent process can capture the log messages. This function is a generator. It yields lines from the child process’ CASA log output. Because the child process is a fork of the parent, it inherits a complete clone of the parent’s state at the time of forking. That means that the *function* argument you pass it can do just about anything you’d do in a regular program. The child process’ standard output and error streams are redirected to ``/dev/null`` unless the *debug* argument is true. Note that the CASA log output is redirected to a pipe that is neither of these streams. So, if the function raises an unhandled Python exception, the Python traceback will not pollute the CASA log output. But, by the same token, the calling program will not be able to detect that the exception occurred except by its impact on the expected log output. """
import sys, os readfd, writefd = os.pipe() pid = os.fork() if pid == 0: # Child process. We never leave this branch. # # Log messages of priority >WARN are sent to stderr regardless of the # status of log.showconsole(). The idea is for this subprocess to be # something super lightweight and constrained, so it seems best to # nullify stderr, and stdout, to not pollute the output of the calling # process. # # I thought of using the default logger() setup and dup2'ing stderr to # the pipe fd, but then if anything else gets printed to stderr (e.g. # Python exception info), it'll get sent along the pipe too. The # caller would have to be much more complex to be able to detect and # handle such output. os.close(readfd) if not debug: f = open(os.devnull, 'w') os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) sink = logger(filter=filter) sink.setlogfile(b'/dev/fd/%d' % writefd) function(sink) sys.exit(0) # Original process. os.close(writefd) with os.fdopen(readfd) as readhandle: for line in readhandle: yield line info = os.waitpid(pid, 0) if info[1]: # Because we're a generator, this is the only way for us to signal if # the process died. We could be rewritten as a context manager. e = RuntimeError('logging child process PID %d exited ' 'with error code %d' % tuple(info)) e.pid, e.exitcode = info raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_extended(scene, resp): """ Parse metadata returned from the metadataUrl of a USGS scene. :param scene: Dictionary representation of a USGS scene :param resp: Response object from requests/grequests """
root = ElementTree.fromstring(resp.text) items = root.findall("eemetadata:metadataFields/eemetadata:metadataField", NAMESPACES) scene['extended'] = {item.attrib.get('name').strip(): xsi.get(item[0]) for item in items} return scene
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _async_requests(urls): """ Sends multiple non-blocking requests. Returns a list of responses. :param urls: List of urls """
session = FuturesSession(max_workers=30) futures = [ session.get(url) for url in urls ] return [ future.result() for future in futures ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def metadata(dataset, node, entityids, extended=False, api_key=None): """ Request metadata for a given scene in a USGS dataset. :param dataset: :param node: :param entityids: :param extended: Send a second request to the metadata url to get extended metadata on the scene. :param api_key: """
api_key = _get_api_key(api_key) url = '{}/metadata'.format(USGS_API) payload = { "jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key) } r = requests.post(url, payload) response = r.json() _check_for_usgs_error(response) if extended: metadata_urls = map(_get_metadata_url, response['data']) results = _async_requests(metadata_urls) data = map(lambda idx: _get_extended(response['data'][idx], results[idx]), range(len(response['data']))) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reraise_context(fmt, *args): """Reraise an exception with its message modified to specify additional context. This function tries to help provide context when a piece of code encounters an exception while trying to get something done, and it wishes to propagate contextual information farther up the call stack. It only makes sense in Python 2, which does not provide Python 3’s `exception chaining <https://www.python.org/dev/peps/pep-3134/>`_ functionality. Instead of that more sophisticated infrastructure, this function just modifies the textual message associated with the exception being raised. If only a single argument is supplied, the exception text prepended with the stringification of that argument. If multiple arguments are supplied, the first argument is treated as an old-fashioned ``printf``-type (``%``-based) format string, and the remaining arguments are the formatted values. Example usage:: from pwkit import reraise_context from pwkit.io import Path filename = 'my-filename.txt' try: f = Path(filename).open('rt') for line in f.readlines(): except Exception as e: reraise_context('while reading "%r"', filename) # The exception is reraised and so control leaves this function. If an exception with text ``"bad value"`` were to be raised inside the ``try`` block in the above example, its text would be modified to read ``"while reading \"my-filename.txt\": bad value"``. """
import sys if len(args): cstr = fmt % args else: cstr = text_type(fmt) ex = sys.exc_info()[1] if isinstance(ex, EnvironmentError): ex.strerror = '%s: %s' % (cstr, ex.strerror) ex.args = (ex.errno, ex.strerror) else: if len(ex.args): cstr = '%s: %s' % (cstr, ex.args[0]) ex.args = (cstr, ) + ex.args[1:] raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """Return a shallow copy of this object. """
new = self.__class__() new.__dict__ = dict(self.__dict__) return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_boards(*args, **kwargs): """Returns every board on 4chan. Returns: dict of :class:`basc_py4chan.Board`: All boards. """
# Use https based on how the Board class instances are to be instantiated https = kwargs.get('https', args[1] if len(args) > 1 else False) # Dummy URL generator, only used to generate the board list which doesn't # require a valid board name url_generator = Url(None, https) _fetch_boards_metadata(url_generator) return get_boards(_metadata.keys(), *args, **kwargs)