text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookupfile(filename): """Returns dictionary of file content as well as google reverse GEO data"""
logger.info('Looking up %s'%(filename)) # First open cache file to see if we already looked up this stuff dirname=os.path.dirname(filename) basefilename=os.path.basename(filename) CACHE_FILE = os.path.join(dirname,'.'+basefilename+'.gpscache') cache=loadCacheFile(CACHE_FILE) # Get the input file positions=parsePositionFile(filename) # If load didn't work, read again and lookup if not cache: logger.info("%s - No cache file found, looking up location"\ %(basefilename)) cache=lookupGeoInfo(positions) # Save to DB json.dump(cache,open(CACHE_FILE,'w')) else: logger.info("%s - Found cache file for locations"\ %(basefilename)) return positions,cache
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parsePositionFile(filename): """ Parses Android GPS logger csv file and returns list of dictionaries """
l=[] with open( filename, "rb" ) as theFile: reader = csv.DictReader( theFile ) for line in reader: # Convert the time string to something # a bit more human readable mytime=dateparser.parse(line['time']) line['strtime']=mytime.strftime("%d %b %Y, %H:%M UTC") l.append(line) return l
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def changed(dirname, filename='.md5', args=None, glob=None): """Has `glob` changed in `dirname` Args: dirname: directory to measure filename: filename to store checksum """
root = Path(dirname) if not root.exists(): # if dirname doesn't exist it is changed (by definition) return True cachefile = root / filename current_digest = cachefile.open().read() if cachefile.exists() else "" _digest = digest(dirname, glob=glob) if args and args.verbose: # pragma: nocover print("md5:", _digest) has_changed = current_digest != _digest if has_changed: with open(os.path.join(dirname, filename), 'w') as fp: fp.write(_digest) return has_changed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): # pragma: nocover """Return exit code of zero iff directory is not changed. """
p = argparse.ArgumentParser() p.add_argument( 'directory', help="Directory to check" ) p.add_argument( '--verbose', '-v', action='store_true', help="increase verbosity" ) args = p.parse_args() import sys _changed = changed(sys.argv[1], args=args) sys.exit(_changed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def changed(self, filename='.md5', glob=None): """Are any of the files matched by ``glob`` changed? """
if glob is not None: filename += '.glob-' + ''.join(ch.lower() for ch in glob if ch.isalpha()) return changed(self, filename, glob=glob)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_order(self, debtor, is_vat_included=True, due_date=None, heading='', text_line1='', text_line2='', debtor_data=None, delivery_data=None, products=None, project=None, other_reference='', model=models.Order, **extra ): """Create a new Order. Args: debtor (Debtor): the debtor of the order debtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..} defaults to values on debitor instance for missing values defaults to values on debitor instance for missing values due_date (datetime): due date heading (string): heading to be displayed in the order pdf text_line1 (string): first order description line text_line2 (string): second order description line other_reference (string): custom string to be used for identification extra (mapping): mapping of extra values to be passed in to the server call Returns: Order instance """
debtor_data = debtor_data or {} delivery_data = delivery_data or {} delivery_date = delivery_data.get('date', datetime.datetime.now()) our_reference = extra.get('our_reference', debtor.our_reference) currency = extra.get('currency', debtor.currency) layout = extra.get('layout', debtor.layout) term_of_payment = extra.get('term_of_payment', debtor.term_of_payment) date = extra.get('date', datetime.datetime.now()) order_input = { 'debtor': debtor, 'number': extra.get('number', 1), 'project': project, } for dd in ['name', 'address', 'postal_code', 'city', 'country', 'ean']: order_input['debtor_%s' % dd] = debtor_data.get(dd, getattr(debtor, dd)) for dd in ['address', 'postal_code', 'city', 'country']: order_input['delivery_%s' % dd] = delivery_data.get(dd, getattr(debtor, dd)) order_input.update({ 'delivery_date': delivery_date or datetime.datetime.now(), 'heading': heading, 'text_line1': text_line1, 'text_line2': text_line2, 'is_archived': extra.get('is_archived', 0), 'is_sent': extra.get('is_sent', 0), 'net_amount': extra.get('net_amount', 0), 'vat_amount': extra.get('vat_amount', 0), 'gross_amount': extra.get('gross_amount', 0), 'margin': extra.get('margin', 0), 'margin_as_percent': extra.get('margin_as_percent', 0), 'date': date, 'our_reference': our_reference, 'other_reference': other_reference, 'currency': currency, 'exchange_rate': extra.get('exchange_rate', 1.0), 'is_vat_included': is_vat_included, 'layout': layout, 'due_date': due_date or datetime.datetime.now(), 'term_of_payment': term_of_payment }) order_input.update(extra) order = self.create(model, **order_input) if products: for product in products: self.create_orderline(order, product) return order
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __flush(self, async=True): """ Flushes messages through current HttpRequest and closes it. It assumes a current requesthandler and requires a lock on self.lock """
rh = self.rh messages = list(self.messages) stream_notices = list(self.stream_notices) self.stream_notices = [] self.messages = [] args = (rh, messages, stream_notices) if async: self.hub.threadPool.execute_named(self.__inner_flush, '%s __inner__flush' % self.hub.l.name, *args) else: self.__inner_flush(*args) self.rh = None self._set_timeout(int(time.time() + self.hub.timeout))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def return_opml_response(self, context, **response_kwargs): ''' Returns export data as an opml file. ''' self.template_name = 'fiction_outlines/outline.opml' response = super().render_to_response(context, content_type='text/xml', **response_kwargs) response['Content-Disposition'] = 'attachment; filename="{}.opml"'.format(slugify(self.object.title)) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def render_to_response(self, context, **response_kwargs): ''' Compares requested format to supported formats and routes the response. :attribute switcher: A dictionary of format types and their respective response methods. ''' switcher = { 'json': self.return_json_response, 'opml': self.return_opml_response, 'md': self.return_md_response, 'textbundle': self.not_implemented, 'xlsx': self.not_implemented, } if self.format not in switcher.keys(): return self.not_implemented(context, **response_kwargs) return switcher[self.format](context, **response_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CER(prediction, true_labels): """ Calculates the classification error rate for an N-class classification problem Parameters: prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your prediction true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing the ground truth labels for the input array, organized in the same order. """
errors = (prediction != true_labels).sum() return float(errors)/len(prediction)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def benchmark(store, n=10000): """ Iterates over all of the referreds, and then iterates over all of the referrers that refer to each one. Fairly item instantiation heavy. """
R = Referrer for referred in store.query(Referred): for _reference in store.query(R, R.reference == referred): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_uuid(): """Generate a UUID."""
r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes) return r_uuid.decode().replace('=', '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_archive(archive_path, destination_path): """Extracts an archive somewhere on the filesystem."""
tar = tarfile.open(archive_path) tar.errorlevel = 1 tar.extractall(destination_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_file(file_path): """Remove a file from the filesystem."""
if path.exists(file_path): try: rmtree(file_path) except Exception: print('Unable to remove temporary workdir {}'.format(file_path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sub_dfs_by_size(df, size): """Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- age name 0 23 Jen 1 42 Ray age name 2 15 Fin """
for i in range(0, len(df), size): yield (df.iloc[i:i + size])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sub_dfs_by_num(df, num): """Get a generator yielding num consecutive sub-dataframes of the given df. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. num : int The number of sub-dataframe to divide the given dataframe into. Returns ------- generator A generator yielding n consecutive sub-dataframes of the given df. Example ------- age name 0 23 Jen 1 42 Ray age name 2 15 Fin """
size = len(df) / float(num) for i in range(num): yield df.iloc[int(round(size * i)): int(round(size * (i + 1)))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def predict_encoding(file_path, n_lines=20): '''Get file encoding of a text file''' import chardet # Open the file as binary data with open(file_path, 'rb') as f: # Join binary lines for specified number of lines rawdata = b''.join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)['encoding']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_n_header(f, header_char='"'): '''Get the nummber of header rows in a Little Leonardo data file Args ---- f : file stream File handle for the file from which header rows will be read header_char: str Character array at beginning of each header line Returns ------- n_header: int Number of header rows in Little Leonardo data file ''' n_header = 0 reading_headers = True while reading_headers: line = f.readline() if line.startswith(header_char): n_header += 1 else: reading_headers = False return n_header
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_tag_params(tag_model): '''Load param strs and n_header based on model of tag model''' tag_model = tag_model.replace('-', '') tags = dict() tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z', 'Depth', 'Propeller', 'Temperature'] # Return tag parameters if found, else raise error if tag_model in tags: return tags[tag_model] else: raise KeyError('{} not found in tag dictionary'.format(tag_model))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def find_file(path_dir, search_str, file_ext): '''Find path of file in directory containing the search string''' import os file_path = None for file_name in os.listdir(path_dir): if (search_str in file_name) and (file_name.endswith(file_ext)): file_path = os.path.join(path_dir, file_name) break if file_path == None: raise SystemError('No file found containing string: ' '{}.'.format(search_str)) return file_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def nearest(items, pivot): '''Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot` ''' return min(items, key=lambda x: abs(x - pivot))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_experiment_params(name_exp): '''Parse experiment parameters from the data directory name Args ---- name_exp: str Name of data directory with experiment parameters Returns ------- tag_params: dict of str Dictionary of parsed experiment parameters ''' if ('/' in name_exp) or ('\\' in name_exp): raise ValueError("The path {} appears to be a path. Please pass " "only the data directory's name (i.e. the " "experiment name)".format(name_exp)) tag_params = dict() tag_params['experiment'] = name_exp tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','') tag_params['tag_id'] = name_exp.split('_')[2] tag_params['animal'] = name_exp.split('_')[3] tag_params['notes'] = name_exp.split('_')[4] return tag_params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_cassandra_config(self): """Retrieve a dict containing Cassandra client config params."""
parts = urlsplit(os.environ.get('CASSANDRA_URI', DEFAULT_URI)) if parts.scheme != 'cassandra': raise RuntimeError( 'CASSANDRA_URI scheme is not "cassandra://"!') _, _, ip_addresses = socket.gethostbyname_ex(parts.hostname) if not ip_addresses: raise RuntimeError('Unable to find Cassandra in DNS!') return { 'contact_points': ip_addresses, 'port': parts.port or DEFAULT_PORT, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(self, query, name=None): """Create and cache a prepared statement using the provided query. This function will take a ``query`` and optional ``name`` parameter and will create a new prepared statement for the provided ``query``. The resulting statement object will be cached so future invocations of this function will not incur the overhead or recreating the statement. If ``name`` is provided it will be used as the key for the cache, so you'll be able to call ``execute`` using the name. :pram str query: The query to prepare. :pram str name: (Optional) name to use as a key in the cache. """
key = name or query stmt = CassandraConnection._prepared_statement_cache.get(key, None) if stmt is not None: return stmt stmt = self._session.prepare(query) CassandraConnection._prepared_statement_cache[key] = stmt return stmt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self, query, *args, **kwargs): """Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters. """
tornado_future = Future() cassandra_future = self._session.execute_async(query, *args, **kwargs) self._ioloop.add_callback( self._callback, cassandra_future, tornado_future) return tornado_future
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_linear2(fn, header=True): """Read a plink 2 output file of type glm.linear into a pandas DataFrame. Parameters fn : str Path to the plink file. The file can be gzipped or not. header : str True if the file has a header (this is generally the case unless the file has been processed after it was created). False if no header. Pass None if it's unknown whether the file has a header. Returns ------- res : pandas.DataFrame Dataframe with results. """
dtypes = {'#CHROM':str, 'POS':int, 'ID':str, 'REF':str, 'ALT1':str, 'TEST':str, 'OBS_CT':int, 'BETA':float, 'SE':float, 'T_STAT':float, 'P':float} if header is None: if fn[-3:] == '.gz': from gzip import open with open(fn, 'r') as f: line = f.readline() else: with open(fn, 'r') as f: line = f.readline() header = line [0] == '#' if header: res = pd.read_table(fn, index_col=2, dtype=dtypes, low_memory=False) else: cols = ['#CHROM', 'POS', 'ID', 'REF', 'ALT1', 'TEST', 'OBS_CT', 'BETA', 'SE', 'T_STAT', 'P'] res = pd.read_table(fn, index_col=2, dtype=dtypes, names=cols, low_memory=False) res.columns = [x.replace('#', '') for x in res.columns] return(res)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_log2(fn): """Parse out some information from a plink 2 log. This function currently only supports log files from linear or logistic regression. Parameters fn : str Path to the plink log file. Returns ------- res : pandas.DataFrame Dataframe with log file information. """
with open(fn) as f: lines = f.readlines() if len(lines) == 0: sys.stderr.write('Empty log file: {}.\n'.format(fn)) return(None) logtype = None # TODO: Eventually, I will look for other arguments that indicate which # plink analysis was run. if len([x for x in lines if '--glm standard-beta' in x]): logtype = 'linear' elif len([x for x in lines if '--glm firth-fallback' in x]): logtype = 'logistic' if logtype is None: return(None) sys.stderr.write('Log file not supported: {}.\n'.format(fn)) try: lines = [x for x in lines if 'remaining after' in x] i = 0 x = lines[i].split() samples = int(x[0]) females = int(x[2][1:]) males = int(x[4]) i += 1 cases = np.nan controls = np.nan if logtype == 'logistic': x = lines[i].split() cases = int(x[0]) controls = int(x[3]) i += 1 variants = int(lines[i].split()[0]) except: sys.stderr.write('Error parsing log file: {}.\n'.format(fn)) return(None) se = pd.Series([samples, females, males, cases, controls, variants], index=['samples', 'females', 'males', 'cases', 'controls', 'variants']).dropna() return(se)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def x_y_by_col_lbl(df, y_col_lbl): """Returns an X dataframe and a y series by the given column name. Parameters df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- Age Name 1 23 Jo 2 19 Mi 1 4 2 3 Name: D, dtype: int64 """
x_cols = [col for col in df.columns if col != y_col_lbl] return df[x_cols], df[y_col_lbl]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def x_y_by_col_lbl_inplace(df, y_col_lbl): """Breaks the given dataframe into an X frame and a y series by the given column name. The original frame is returned, without the y series column, as the X frame, so no new dataframes are created. Parameters df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- Age Name 1 23 Jo 2 19 Mi 1 4 2 3 Name: D, dtype: int64 """
y = df[y_col_lbl] df.drop( labels=y_col_lbl, axis=1, inplace=True, ) return df, y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def or_by_masks(df, masks): """Returns a sub-dataframe by the logical or over the given masks. Parameters df : pandas.DataFrame The dataframe to take a subframe of. masks : list A list of pandas.Series of dtype bool, indexed identically to the given dataframe. Returns ------- pandas.DataFrame The sub-dataframe resulting from applying the masks to the dataframe. Example ------- Age Name 2 19 Mi 3 15 Di """
if len(masks) < 1: return df if len(masks) == 1: return df[masks[0]] overall_mask = masks[0] | masks[1] for mask in masks[2:]: overall_mask = overall_mask | mask return df[overall_mask]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visualize(x, y, xlabel=None, ylabel=None, title=None, ylim=None): """A universal function plot arbitrary time series data. """
total_seconds = (x[-1] - x[0]).total_seconds() if total_seconds <= 86400 * 1 * 3: return plot_one_day(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 7 * 2: return plot_one_week(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 30 * 1.5: return plot_one_month(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 90 * 1.5: return plot_one_quarter(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 365 * 1.5: return plot_one_year(x, y, xlabel, ylabel, title, ylim)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_demand_graph(self): """create demand graph"""
# The number of clusters K = self.origins.shape[0] # Set the number of accounts in each cluster to be the same # as for the nearest neighbor solution demand = self.nearest_targets.groupby('origin_id')['geometry'].count().to_dict() # Set up the graph so we can extract and initialize the node labels. # For each iteration, we're going to sort all our data by their origin # label assignments in order to properly index our nodes. self.targets = self.targets.sort_values('labels').reset_index(drop=True) # Add target nodes g = nx.DiGraph() g.add_nodes_from(self.targets['target_id'], demand=-1) # Add origin nodes for idx in demand: g.add_node(int(idx), demand=demand[idx]) # Dictionary of labels (corresponding to the sales rep) for # each med center node. dict_M = { i: ( self.targets[self.targets['target_id'] == i]['labels'].values if i in self.targets.target_id.values else np.array([demand[i]]) ) for i in g.nodes } logging.info('Graph and demand dictionary created') return dict_M, demand
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def results_to_table(self): """Process self.results and send to carto table"""
# Get Labels baseline_labels = self.nearest_targets['origin_id'].values mcf_labels = self.results['model_labels']['labels'].values # Create the outcomes outcome = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip(self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values)], 'target_lng': self.results['model_labels']['lng'].values, 'target_lng': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(baseline_labels)['lng'].values, 'origin_lat': self.origins.reindex(baseline_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': baseline_labels } ) outcomes2 = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip( self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values ) ], 'target_lng': self.results['model_labels']['lng'].values, 'target_lat': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(mcf_labels)['lng'].values, 'origin_lat': self.origins.reindex(mcf_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': mcf_labels }, index=self.results['model_labels'].target_id ) now = datetime.datetime.now() out_table = 'mincostflow_{}'.format(now.strftime("%Y_%m_%d_%H_%M_%S")) logging.info('Writing output to {}'.format(out_table)) self.context.write(outcomes2.reset_index(drop=True), out_table) logging.info('Table {} written to CARTO'.format(out_table)) return out_table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_model_meta_options(self) -> List[MetaOption]: """" Define fields allowed in the Meta class on end-user models, and the behavior of each. Custom ModelMetaOptions classes should override this method to customize the options supported on class Meta of end-user models. """
# we can't use current_app to determine if we're under test, because it # doesn't exist yet testing_options = ([] if os.getenv('FLASK_ENV', False) != TEST else [_TestingMetaOption()]) # when options require another option, its dependent must be listed. # options in this list are not order-dependent, except where noted. # all ColumnMetaOptions subclasses require PolymorphicMetaOption return testing_options + [ AbstractMetaOption(), # required; must be first LazyMappedMetaOption(), RelationshipsMetaOption(), # requires lazy_mapped TableMetaOption(), MaterializedViewForMetaOption(), PolymorphicMetaOption(), # must be first of all polymorphic options PolymorphicOnColumnMetaOption(), PolymorphicIdentityMetaOption(), PolymorphicBaseTablenameMetaOption(), PolymorphicJoinedPkColumnMetaOption(), # requires _BaseTablename # must be after PolymorphicJoinedPkColumnMetaOption PrimaryKeyColumnMetaOption(), CreatedAtColumnMetaOption(), UpdatedAtColumnMetaOption(), ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def CreateGallery(): """Creates a Gallery on the server. Returns a Gallery object with the editor_id and reader_id. """
url = 'http://min.us/api/CreateGallery' response = _dopost(url) _editor_id = response["editor_id"] _reader_id = response["reader_id"] return Gallery(_reader_id, editor_id=_editor_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logged_user(request): """ Returns a command that retrieves the current logged user based on secure cookie If there is no logged user, the result from command is None """
dct = cookie_facade.retrive_cookie_data(request, USER_COOKIE_NAME).execute().result if dct is None: return Command() return NodeSearch(dct['id'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_users_by_email_starting_with(email_prefix=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email. It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """
email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_starts_with(email_prefix), page_size, cursor, cache_begin=None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """
email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group), page_size, cursor, cache_begin=None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def json_error(code, message): """Returns a JSON-ified error object"""
# Message can be an unserializable object. message = repr(message) return jsonify(dict(request=request.path, message=message)), code
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def error(code, message, template): """A generic error handler"""
if json_requested(): return json_error(code, message) else: return render_template(template, message=message), code
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def straight_line_show(title, length=100, linestyle="=", pad=0): """Print a formatted straight line. """
print(StrTemplate.straight_line( title=title, length=length, linestyle=linestyle, pad=pad))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def indented_show(text, howmany=1): """Print a formatted indented text. """
print(StrTemplate.pad_indent(text=text, howmany=howmany))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def box_show(text, width=100, height=3, corner="+", horizontal="-", vertical="|"): """Print a formatted ascii text box. """
print(StrTemplate.box(text=text, width=width, height=height, corner=corner, horizontal=horizontal, vertical=vertical))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def increment(d, key, val=1): """ increment dict d at key by amount val no need to return since d is mutable """
if key in d: d[key] += val else: d[key] = val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def floor_nearest(x, dx=1): """ floor a number to within a given rounding accuracy """
precision = get_sig_digits(dx) return round(math.floor(float(x) / dx) * dx, precision)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ceil_nearest(x, dx=1): """ ceil a number to within a given rounding accuracy """
precision = get_sig_digits(dx) return round(math.ceil(float(x) / dx) * dx, precision)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def frange(x, y, jump=1): """ range for floats """
precision = get_sig_digits(jump) while x < y: yield round(x, precision) x += jump
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def discard(self, key): """Remove a item from its member if it is a member. Usage:: OrderedSet([1, 3]) **中文文档** 从有序集合中删除一个元素, 同时保持集合依然有序。 """
if key in self.map: key, prev, next_item = self.map.pop(key) prev[2] = next_item next_item[1] = prev
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersection(*argv): """Returns the intersection of multiple sets. **中文文档** """
res = OrderedSet(argv[0]) for ods in argv: res = ods & res return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def do(self): "run it, you can get a good stitching of the complete URL." return urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def do(self): "run it, get a new url" scheme, netloc, path, params, query, fragment = Split(self.url).do() if isinstance(self.query, dict): query = query + "&" + urllib.urlencode(self.query) if query else urllib.urlencode(self.query) path = urljoin(path, self.path).replace('\\', '/') if self.path else path return Splice(scheme=scheme, netloc=netloc, path=path, params=params, query=query, fragment=fragment).geturl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def changelist_view(self, request, extra_context=None): """ Updates the changelist view to include settings from this admin. """
return super(TrackedLiveAdmin, self).changelist_view( request, dict(extra_context or {}, url_name='admin:%s_%s_tracking_report' % (self.model._meta.app_label, self.model._meta.model_name), period_options=self.get_period_options(), report_options=self.get_report_options()) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_report_error(self, request, error, status): """ Renders the report errors template. """
opts = self.model._meta app_label = opts.app_label request.current_app = self.admin_site.name context = dict( self.admin_site.each_context(request), module_name=force_text(opts.verbose_name_plural), title=(_('Tracking report error for %s') % force_text(opts.verbose_name)), opts=opts, app_label=app_label, error=error ) return TemplateResponse(request, self.report_error_template or [ "admin/{}/{}/tracking_report_error.html".format(app_label, opts.model_name), "admin/{}/tracking_report_error.html".format(app_label), "admin/tracking_report_error.html" ], context, status=status)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report_view(self, request, key, period): """ Processes the reporting action. """
if not self.has_change_permission(request, None): raise PermissionDenied reporters = self.get_reporters() try: reporter = reporters[key] except KeyError: return self.render_report_error(request, _('Report not found'), 404) allowed_periods = [k for (k, v) in self.get_period_options()] if period == 'A': period = '' if period and period not in allowed_periods: return self.render_report_error(request, _('Invalid report type'), 400) try: return reporter.process(request, self.get_period_queryset(request, period), period) except: logger.exception('Tracking Reports could not generate the report due to an internal error') return self.render_report_error(request, _('An unexpected error has occurred'), 500)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_record(self, model_class, record_id, reload=False): """Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """
cached_model = self.peek_record(model_class, record_id) if cached_model is not None and reload is False: return cached_model else: return self._get_record(model_class, record_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def peek_record(self, model_class, record_id): """Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """
if self._cache: return self._cache.get_record(model_class.__name__, record_id) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_all(self, model_class, params={}): """Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list. """
url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def peek_all(self, model_class): """Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list. """
if self._cache: return self._cache.get_records(model_class.__name__) else: return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_record(self, model_class, record_id): """Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """
url = '{host}/{namespace}/{model}/{id}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id ) data = self._get_json(url)['data'] fresh_model = model_class(data['attributes']) fresh_model.id = data['id'] fresh_model.validate() if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _translate_name(name): """Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string. """
underscored = inflection.underscore(name) dasherized = inflection.dasherize(underscored) words = dasherized.split('-') last_word = words.pop() words.append(inflection.pluralize(last_word)) return '-'.join(words)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_param_string(params): """Build query params string from a dictionary. Args: params (dict): A dictionary of params Returns: string: A valid url query params string. """
pairs = [] for key, value in params.iteritems(): if value is None: value = '' pairs.append('{0}={1}'.format(key, value)) if len(pairs) > 0: return '?{0}'.format('&'.join(pairs)) return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(self, message): """Send a the defined message to the backend. :param message: Message to be send, usually a Python dictionary. """
try: self.ws.send(json.dumps(message)) except websocket._exceptions.WebSocketConnectionClosedException: raise SelenolWebSocketClosedException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recv(self): """Receive message from the backend or wait unilt next message."""
try: message = self.ws.recv() return json.loads(message) except websocket._exceptions.WebSocketConnectionClosedException as ex: raise SelenolWebSocketClosedException() from ex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle(self, *args, **options): """Create new app"""
quickstart = Quickstart() try: quickstart.create_app(os.path.join(settings.BASE_DIR, 'apps'), options.get('name')) self.stdout.write( self.style.SUCCESS("Successfully created app ({name}), don't forget to add 'apps.{name}' to INSTALLED_APPS".format( name=options.get('name') )) ) except FileExistsError as e: print(e) raise CommandError("App with same name already exists")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_client(host, project_name, api_key, create_project): """ Instantiate the grano client based on environment variables or command line settings. """
if host is None: raise click.BadParameter('No grano server host is set', param=host) if project_name is None: raise click.BadParameter('No grano project slug is set', param=project_name) if api_key is None: raise click.BadParameter('No grano API key is set', param=api_key) client = Grano(api_host=host, api_key=api_key) try: return client.get(project_name) except NotFound: if not create_project: sys.exit(-1) data = {'slug': project_name, 'label': project_name} return client.projects.create(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def csv(ctx, force, threads, mapping, data): """ Load CSV data into a grano instance using a mapping specification. """
# Find out how many lines there are (for the progress bar). lines = 0 for line in DictReader(data): lines += 1 data.seek(0) # set up objects mapping = yaml.load(mapping) mapping_loader = MappingLoader(ctx.obj['grano'], mapping) def process_row(row): try: mapping_loader.load(row) except GranoException, ge: msg = '\nServer error: %s' % ge.message click.secho(msg, fg='red', bold=True) if not force: os._exit(1) except RowException, re: if not force: msg = '\nRow %s: %s' % (row['__row_id__'], re.message) click.secho(msg, fg='red', bold=True) os._exit(1) def generate(): with click.progressbar(DictReader(data), label=data.name, length=lines) as bar: for i, row in enumerate(bar): row['__row_id__'] = i yield row threaded(generate(), process_row, num_threads=threads, max_queue=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schema(ctx, schema): """ Load schema definitions from a YAML file. """
data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj['grano'].schemata.upsert(schema)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def json_using_iso8601(__obj: Dict) -> Dict: """Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode """
for key, value in __obj.items(): with suppress(TypeError, ValueError): __obj[key] = parse_datetime(value) with suppress(TypeError, ValueError): __obj[key] = parse_delta(value) return __obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tmpdir(): """ On first invocation, creates a temporary directory and returns its path. Subsequent invocations uses the same directory. :returns: A temporary directory created for this run of glerbl. :rtype: :class:`str` """
global __tmpdir if __tmpdir is not None: return __tmpdir __tmpdir = tempfile.mkdtemp(prefix='.tmp.glerbl.', dir=".") atexit.register(__clean_tmpdir) return __tmpdir
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_against(): """ Determines the revision against which the staged data ought to be checked. :returns: The revision. :rtype: :class:`str` """
global __cached_against if __cached_against is not None: return __cached_against status = subprocess.call(["git", "rev-parse", "--verify", "HEAD"], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if not status: against = 'HEAD' else: # Initial commit: diff against an empty tree object against = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' __cached_against = against return against
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def entry_point(__func: Callable) -> Callable: """Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run """
if __func.__module__ == '__main__': import sys sys.exit(__func()) else: return __func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_col_nums(): """Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples. """
NUM_REPEATS = 2 column_letters = list( string.ascii_uppercase ) + map( ''.join, itertools.product( string.ascii_uppercase, repeat=NUM_REPEATS ) ) letter_numbers = [] count = 1 for letter in column_letters: letter_numbers.append((count, str(count) + ' (' + letter + ')')) count += 1 return tuple(letter_numbers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_comment(self, body, allow_create=False, allow_hashes=False, summary=None): """Add comment as required by comments.CommentThread parent class. """
thread_id = self.lookup_thread_id() if not allow_create and not self.redis.exists(thread_id): raise ValueError('Tried to add comment to non-exist thread %s' % ( thread_id)) comment = comments.SingleComment( self.user, datetime.datetime.now(datetime.timezone.utc), body, summary=summary) lpush = self.redis.lpush(thread_id, comment.to_json()) logging.debug('Pushing comment to redis returned %s', str(lpush)) if self.ltrim: ltrim = self.redis.ltrim(thread_id, 0, self.ltrim) logging.debug('Redis ltrim returend %s', str(ltrim)) else: ltrim = None return {'status': 'OK', 'lpush': lpush, 'ltrim': ltrim}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def submit_task(rel_path, cache_string, buffer): """Put an upload job on the queue, and start the thread if required"""
global upload_queue global upload_thread upload_queue.put((rel_path, cache_string, buffer)) if upload_thread is None or not upload_thread.is_alive(): upload_thread = UploaderThread() upload_thread.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_verse(self, v=1): """Get a specific verse."""
verse_count = len(self.verses) if v - 1 < verse_count: return self.verses[v - 1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_line(self, line=1): """Return a specific line."""
verse_size = len(self.get_verse()) + 1 if line > 1: verse = math.floor((line - 1) / verse_size) line_in_verse = (line - 1) % verse_size try: return self.verses[verse][line_in_verse] except IndexError: return '' else: return self.verses[0][0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_poem(self): """Print all the verses."""
for index, verse in enumerate(self.verses): for line in verse: print(line) if index != len(self.verses) - 1: print('')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mods(package): """ List all loadable python modules in a directory This function looks inside the specified directory for all files that look like Python modules with a numeric prefix and returns them. It will omit any duplicates and return file names without extension. :param package: package object :returns: list of tuples containing filename without extension, major_version and minor_version """
pkgdir = package.__path__[0] matches = filter(None, [PYMOD_RE.match(f) for f in os.listdir(pkgdir)]) parse_match = lambda groups: (groups[0], int(groups[1]), int(groups[2])) return sorted(list(set([parse_match(m.groups()) for m in matches])), key=lambda x: (x[1], x[2]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_new(modules, min_major_version, min_minor_version): """ Get list of migrations that haven't been run yet :param modules: iterable containing module names :param min_major_version: minimum major version :param min_minor_version: minimum minor version :returns: return an iterator that yields only items which versions are >= min_ver """
for mod_data in modules: (modname, mod_major_version, mod_minor_version) = mod_data if (mod_major_version > min_major_version or (mod_major_version == min_major_version and mod_minor_version >= min_minor_version)): yield mod_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_mod(module, package): """ Load a module named ``module`` from given search``path`` The module path prefix is set according to the ``prefix`` argument. By defualt the module is loaded as if it comes from a global 'db_migrations' package. As such, it may conflict with any 'db_migration' package. The module can be looked up in ``sys.modules`` as ``db_migration.MODNAME`` where ``MODNAME`` is the name supplied as ``module`` argument. Keep in mind that relative imports from within the module depend on this prefix. This function raises an ``ImportError`` exception if module is not found. :param module: name of the module to load :param package: package object :returns: module object """
name = '%s.%s' % (package.__name__, module) if name in sys.modules: return sys.modules[name] return importlib.import_module(name, package=package.__name__)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unpack_version(version): """Unpack a single version integer into the two major and minor components."""
minor_version = version % VERSION_MULTIPLIER major_version = (version - minor_version) / VERSION_MULTIPLIER return (major_version, minor_version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_version(db, name, major_version, minor_version): """ Set database migration version :param db: connetion object :param name: associated name :param major_version: integer major version of migration :param minor_version: integer minor version of migration """
version = pack_version(major_version, minor_version) db.execute(SET_VERSION_SQL, dict(name=name, version=version))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_migration(name, major_version, minor_version, db, mod, conf={}): """ Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any) """
with db.transaction(): mod.up(db, conf) set_version(db, name, major_version, minor_version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def migrate(db, name, package, conf={}): """ Run all migrations that have not been run Migrations will be run inside a transaction. :param db: database connection object :param name: name associated with the migrations :param package: package that contains the migrations :param conf: application configuration object """
(current_major_version, current_minor_version) = get_version(db, name) package = importlib.import_module(package) logging.debug('Migration version for %s is %s.%s', package.__name__, current_major_version, current_minor_version) mods = get_mods(package) migrations = get_new(mods, current_major_version, current_minor_version + 1) for (modname, major_version, minor_version) in migrations: mod = load_mod(modname, package) run_migration(name, major_version, minor_version, db, mod, conf) logging.debug("Finished migrating to %s", modname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html2vtml(vtmarkup): """ Convert hypertext markup into vt markup. The output can be given to `vtmlrender` for converstion to VT100 sequences. """
try: htmlconv.feed(vtmarkup) htmlconv.close() return htmlconv.getvalue() finally: htmlconv.reset()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def render(self,scope=None,local_vars=None,block=None): ''' Render the template in the given scope with the locals specified. If a block is given, it is typically available within the template via +yield+. ''' if not scope: class Scope(object): pass scope = Scope() return self.evaluate(scope,local_vars or {}, block)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def basename(self, suffix=''): ''' The basename of the template file.''' return os.path.basename(self._file, suffix) if self._file else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distinct(self): """ Only return distinct row. Return a new query set with distinct mark """
new_query_set = self.clone() new_query_set.query.distinct = True return new_query_set
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_json(json_data): """ return a list of GradDegree objects. """
requests = [] for item in json_data: degree = GradDegree() degree.degree_title = item["degreeTitle"] degree.exam_place = item["examPlace"] degree.exam_date = parse_datetime(item.get("examDate")) degree.req_type = item["requestType"] degree.major_full_name = item["majorFullName"] degree.submit_date = parse_datetime(item.get("requestSubmitDate")) degree.decision_date = parse_datetime(item.get('decisionDate')) degree.status = item["status"] degree.target_award_year = item["targetAwardYear"] if item.get("targetAwardQuarter")and\ len(item.get("targetAwardQuarter")): degree.target_award_quarter = item["targetAwardQuarter"].lower() requests.append(degree) return requests
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_meta(path_dir, tag_model, tag_id): '''Read meta data from Little Leonardo data header rows Args ---- path_dir: str Parent directory containing lleo data files tag_model: str Little Leonardo tag model name tag_id: str, int Little Leonardo tag ID number Returns ------- meta: dict dictionary with meta data from header lines of lleo data files ''' from collections import OrderedDict import os import yamlord from . import utils def _parse_meta_line(line): '''Return key, value pair parsed from data header line''' # Parse the key and its value from the line key, val = line.replace(':', '').replace('"', '').split(',') return key.strip(), val.strip() def _read_meta_all(f, meta, n_header): '''Read all meta data from header rows of data file''' # Skip 'File name' line f.seek(0) _ = f.readline() # Create child dictionary for channel / file line = f.readline() key_ch, val_ch = _parse_meta_line(line) val_ch = utils.posix_string(val_ch) meta['parameters'][val_ch] = OrderedDict() # Write header values to channel dict for _ in range(n_header-2): line = f.readline() key, val = _parse_meta_line(line) meta['parameters'][val_ch][key] = val.strip() return meta def _create_meta(path_dir, tag_model, tag_id): '''Create meta data dictionary''' import datetime from . import utils param_strs = utils.get_tag_params(tag_model) # Create dictionary of meta data meta = OrderedDict() # Create fields for the parameters in data directory name exp_name = os.path.split(path_dir)[1] params_tag = utils.parse_experiment_params(exp_name) for key, value in params_tag.items(): meta[key] = value fmt = "%Y-%m-%d %H:%M:%S" meta['date_modified'] = datetime.datetime.now().strftime(fmt) meta['parameters'] = OrderedDict() for param_str in param_strs: print('Create meta entry for {}'.format(param_str)) path_file = utils.find_file(path_dir, param_str, '.TXT') # Get number of header rows enc = utils.predict_encoding(path_file, n_lines=20) with open(path_file, 'r', encoding=enc) as f: n_header = utils.get_n_header(f) f.seek(0) meta = _read_meta_all(f, meta, n_header=n_header) return meta # Load meta data from YAML file if it already exists meta_yaml_path = os.path.join(path_dir, 'meta.yml') # Load file if exists else create if os.path.isfile(meta_yaml_path): meta = yamlord.read_yaml(meta_yaml_path) # Else create meta dictionary and save to YAML else: meta = _create_meta(path_dir, tag_model, tag_id) yamlord.write_yaml(meta, meta_yaml_path) return meta
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _merge_values(self, to_values, from_values): """Merges two dictionaries of values recursively. This is a very naive implementation that expects the two dictionaries to be fairly similar in structure. @param to_values destination dictionary @param from_values dictionary with values to copy """
if from_values is not None: for k, v in from_values.items(): if k in to_values and isinstance(to_values[k], dict): self._merge_values(to_values[k], v) # merge else: to_values[k] = v # replaces instead of merge return to_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_config(self, path): """Return YAML values from given config file. @param path file to load """
try: with open(path) as f: values = yaml.safe_load(f) if isinstance(values, dict): return values else: raise yaml.YAMLError('Unable to parse/load {}'.format(path)) except(IOError, yaml.YAMLError) as e: if self.ignore_errors: return None else: raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _normalize_file_paths(self, *args): """Returns all given configuration file paths as one list."""
paths = [] for arg in args: if arg is None: continue elif self._is_valid_file(arg): paths.append(arg) elif isinstance(arg, list) and all(self._is_valid_file(_) for _ in arg): paths = paths + arg elif not self.ignore_errors: raise TypeError('Config file paths must be string path or list of paths!') return paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_valid_file(self, path): """Simple check to see if file path exists. Does not check for valid YAML format."""
return isinstance(path, basestring) and os.path.isfile(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_apidoc(_): """Heler function for run apidoc as part of the build."""
current_directory = os.path.abspath(os.path.dirname(__file__)) output_path = os.path.join(current_directory, 'source') cmd_path = 'sphinx-apidoc' if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv # If we are, assemble the path manually cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')) main([cmd_path, '-e', '-o', output_path, '../cinder_data', '--force'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def columnize(items, width=None, file=sys.stdout): """ Smart display width handling when showing a list of stuff. """
if not items: return if width is None: width = shutil.get_terminal_size()[0] if file is sys.stdout else 80 items = [rendering.vtmlrender(x) for x in items] maxcol = max(items, key=len) colsize = len(maxcol) + 2 cols = width // colsize if cols < 2: for x in items: print(x, file=file) return lines = math.ceil(len(items) / cols) for i in range(lines): row = items[i:None:lines] print(*[x.ljust(colsize) for x in row], sep='', file=file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_call(self, path, method): """Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path. """
if not path.endswith('/'): path += '/' path = path.split('/')[1:] return self._recursive_route_match(self._routes, path, method, [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_variables(self, missing_in_geno=None): """Extract the complete set of data based on missingness over all for the current locus. :param missing_in_geno: mask associated with missingness in genotype :return: (phenotypes, covariates, nonmissing used for this set of vars) """
count = 0 mismatch = 0 if missing_in_geno is None: nonmissing = numpy.invert(self.missing[self.idx]) else: nonmissing = numpy.invert(self.missing[self.idx] | missing_in_geno) nmcount = sum(nonmissing) covars = numpy.zeros((self.covar_count, nmcount)) for idx in range(0, self.covar_count): covars[idx] = self.covariates[idx][nonmissing] min = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].min() max = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].max() if min == max: raise InvariantVar("Covar %s doesn't have enough variation to continue" % (self.datasource.covariate_labels[idx])) min = self.phenotypes[self.idx][nonmissing].min() max = self.phenotypes[self.idx][nonmissing].max() if min == max: raise InvariantVar("Phenotype %s doesn't have enough variation to continue" % (self.datasource.phenotype_names[self.idx])) return (self.phenotypes[self.idx][nonmissing], covars, nonmissing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def member(self, user, objects=False): """ Returns a user as a dict of attributes """
try: member = self.search(uid=user, objects=objects)[0] except IndexError: return None if objects: return member return member[1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drinkAdmins(self, objects=False): """ Returns a list of drink admins uids """
admins = self.group('drink', objects=objects) return admins