text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def fill_in_table(self, table, worksheet, flags): ''' Fills in any rows with missing right hand side data with empty cells. ''' max_row = 0 min_row = sys.maxint for row in table: if len(row) > max_row: max_row = len(row) if len(row) < min_row: min_row = len(row) if max_row != min_row: for row in table: if len(row) < max_row: row.extend([None]*(max_row-len(row)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos): ''' Searches for the next location where a valid block could reside and constructs the block object representing that location. ''' for row_index in range(len(table)): if row_index < start_pos[0] or row_index > end_pos[0]: continue convRow = table[row_index] used_row = used_cells[row_index] for column_index, conv in enumerate(convRow): if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]): continue # Is non empty cell? if not is_empty_cell(conv): block_start, block_end = self._find_block_bounds(table, used_cells, (row_index, column_index), start_pos, end_pos) if (block_end[0] > block_start[0] and block_end[1] > block_start[1]): try: return TableBlock(table, used_cells, block_start, block_end, worksheet, flags, units, self.assume_complete_blocks, self.max_title_rows) except InvalidBlockError: pass # Prevent infinite loops if something goes wrong used_cells[row_index][column_index] = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _find_block_bounds(self, table, used_cells, possible_block_start, start_pos, end_pos): ''' First walk the rows, checking for the farthest left column belonging to the block and the bottom most row belonging to the block. If a blank cell is hit and the column started with a blank cell or has length < self.blank_repeat_threshold, then restart one row to the right. Alternatively, if assume_complete_blocks has been set to true, any blank cell stops block detection. Then walk the columns until a column is reached which has blank cells down to the row which marked the as the row end from prior iteration. ''' # If we're only looking for complete blocks, then just walk # until we hit a blank cell if self.assume_complete_blocks: block_start, block_end = self._find_complete_block_bounds( table, used_cells, possible_block_start, start_pos, end_pos) # Otherwise do a smart, multi-pass approach to finding blocks # with potential missing fields else: block_start, block_end = self._find_block_start( table, used_cells, possible_block_start, start_pos, end_pos) block_start, block_end = self._find_block_end( table, used_cells, block_start, block_end, start_pos, end_pos) return block_start, block_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _single_length_title(self, table, row_index, current_col): ''' Returns true if the row is a single length title element with no other row titles. Useful for tracking pre-data titles that belong in their own block. ''' if len(table[row_index]) - current_col <= 0: return False return (is_text_cell(table[row_index][current_col]) and all(not is_text_cell(table[row_index][next_column]) for next_column in xrange(current_col + 1, len(table[row_index]))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _find_block_start(self, table, used_cells, possible_block_start, start_pos, end_pos): ''' Finds the start of a block from a suggested start location. This location can be at a lower column but not a lower row. The function traverses columns until it finds a stopping condition or a repeat condition that restarts on the next column. Note this also finds the lowest row of block_end. ''' current_col = possible_block_start[1] block_start = list(possible_block_start) block_end = list(possible_block_start) repeat = True checked_all = False # Repeat until we've met satisfactory conditions for catching all edge cases or we've # checked all valid block locations while not checked_all and repeat: block_end[0] = max(block_end[0], possible_block_start[0]) block_end[1] = max(block_end[1], current_col) single_titled_block = True table_column = TableTranspose(table)[current_col] used_column = TableTranspose(used_cells)[current_col] # We need to find a non empty cell before we can stop blank_start = is_empty_cell(table_column[possible_block_start[0]]) blank_exited = not blank_start # Unless we have assume_complete_blocks set to True if blank_start and self.assume_complete_blocks: # Found a blank? We're done repeat = False break #TODO refactor code below into new function for easier reading # Analyze the beginning columns for row_index in xrange(possible_block_start[0], end_pos[0] + 1): # Ensure we catch the edge case of the data reaching the edge of the table -- # block_end should then equal end_pos if blank_exited: block_end[0] = max(block_end[0], row_index) if row_index == end_pos[0] or used_column[row_index]: # We've gone through the whole range checked_all = True repeat = False break if not blank_exited: blank_exited = not is_empty_cell(table_column[row_index]) if single_titled_block and not self._single_length_title(table, row_index, current_col): single_titled_block = False # If we saw single length titles for several more than threshold rows, then we # have a unique block before an actual content block if self._above_blank_repeat_threshold(possible_block_start[0], row_index): repeat = False break if is_empty_cell(table_column[row_index]) and len(table[row_index]) > current_col + 1: current_col += 1 break # Go find the left most column that's still valid table_row = table[row_index] used_row = used_cells[row_index] for column_index in range(current_col, start_pos[1] - 1, -1): if is_empty_cell(table_row[column_index]) or used_row[column_index]: break else: block_start[1] = min(block_start[1], column_index) # Check if we've seen few enough cells to guess that we have a repeating title repeat = blank_start or self._below_blank_repeat_threshold(possible_block_start[0], row_index) return block_start, block_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_dict(self, join='.'): """ Returns the error as a path to message dictionary. Paths are joined with the ``join`` string. """
if self.path: path = [str(node) for node in self.path] else: path = '' return { join.join(path): self.message }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_dict(self, join='.'): """ Returns all the errors in this collection as a path to message dictionary. Paths are joined with the ``join`` string. """
result = {} for e in self.errors: result.update(e.as_dict(join)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_gffutils_db(f): """ Load database for gffutils. Parameters f : str Path to database. Returns ------- db : gffutils.FeatureDB gffutils feature database. """
import gffutils db = gffutils.FeatureDB(f, keep_order=True) return db
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_gffutils_db(gtf, db): """ Make database for gffutils. Parameters gtf : str Path to Gencode gtf file. db : str Path to save database to. Returns ------- out_db : gffutils.FeatureDB gffutils feature database. """
import gffutils out_db = gffutils.create_db(gtf, db, keep_order=True, infer_gene_extent=False) return out_db
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_bed_by_name(bt): """ Merge intervals in a bed file when the intervals have the same name. Intervals with the same name must be adjacent in the bed file. """
name_lines = dict() for r in bt: name = r.name name_lines[name] = name_lines.get(name, []) + [[r.chrom, r.start, r.end, r.name, r.strand]] new_lines = [] for name in name_lines.keys(): new_lines += _merge_interval_list(name_lines[name]) new_lines = ['\t'.join(map(str, x)) for x in new_lines] return pbt.BedTool('\n'.join(new_lines) + '\n', from_string=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_feature_bed(gtf, feature, out=None): """ Make a bed file with the start and stop coordinates for all of a particular feature in Gencode. Valid features are the features present in the third column of the Gencode GTF file. Parameters gtf : str Filename of the Gencode gtf file. feature : str Feature from third column of Gencode GTF file. As of v19, these include CDS, exon, gene, Selenocysteine, start_codon, stop_codon, transcript, and UTR. out : str If provided, the bed file will be written to a file with this name. Returns ------- bed : pybedtools.BedTool A sorted pybedtools BedTool object. """
bed_lines = [] with open(gtf) as f: line = f.readline().strip() while line != '': if line[0] != '#': line = line.split('\t') if line[2] == feature: chrom = line[0] start = str(int(line[3]) - 1) end = line[4] if feature == 'gene': name = line[8].split(';')[0].split(' ')[1].strip('"') else: # TODO: I may want to have some smarter naming for # things that aren't genes or transcripts. name = line[8].split(';')[1].split(' ')[2].strip('"') strand = line[6] bed_lines.append('\t'.join([chrom, start, end, name, '.', strand]) + '\n') line = f.readline().strip() bt = pbt.BedTool(''.join(bed_lines), from_string=True) # We'll sort so bedtools operations can be done faster. bt = bt.sort() if out: bt.saveas(out) return bt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_transcript_gene_se(fn): """ Make a Pandas Series with transcript ID's as the index and values as the gene ID containing that transcript. Parameters fn : str Filename of the Gencode gtf file. Returns ------- se : pandas.Series Make a Pandas Series with transcript ID's as the index and values as the gene ID containing that transcript. """
import itertools as it import HTSeq gtf = it.islice(HTSeq.GFF_Reader(fn), None) transcripts = [] genes = [] line = gtf.next() while line != '': if line.type == 'transcript': transcripts.append(line.attr['transcript_id']) genes.append(line.attr['gene_id']) try: line = gtf.next() except StopIteration: line = '' return pd.Series(genes, index=transcripts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_gene_info_df(fn): """ Make a Pandas dataframe with gene information Parameters fn : str of filename Filename of the Gencode gtf file Returns ------- df : pandas.DataFrame Pandas dataframe indexed by gene id with the following columns: gene_type, gene_status, gene_name. """
import itertools as it import HTSeq gff_iter = it.islice(HTSeq.GFF_Reader(fn), None) convD = dict() eof = False while not eof: try: entry = gff_iter.next() if entry.type == 'gene': convD[entry.attr['gene_id']] = [entry.attr['gene_name'], entry.attr['gene_type'], entry.iv.chrom, entry.iv.start, entry.iv.end, entry.iv.strand, entry.attr['gene_status'], entry.source, entry.attr['level']] except StopIteration: eof = True ind = ['gene_name', 'gene_type', 'chrom', 'start', 'end', 'strand', 'gene_status', 'source', 'level'] df = pd.DataFrame(convD, index=ind).T df.index.name = 'gene_id' return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_splice_junction_df(fn, type='gene'): """Read the Gencode gtf file and make a pandas dataframe describing the splice junctions Parameters filename : str of filename Filename of the Gencode gtf file Returns ------- df : pandas.DataFrame Dataframe of splice junctions with the following columns 'gene', 'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor', 'acceptor', 'intron' """
import itertools as it import HTSeq import numpy as np # GFF_Reader has an option for end_included. However, I think it is # backwards. So if your gtf is end-inclusive, you want the default # (end_included=False). With this, one will NOT be subtracted from the end # coordinate. gffI = it.islice(HTSeq.GFF_Reader(fn), None) juncL = [] eof = False entry = gffI.next() count = 1 last_count = 1 while not eof: if entry.type == 'transcript': exonL = [] entry = gffI.next() count += 1 gene = entry.attr['gene_id'] strand = entry.iv.strand while not eof and entry.type != 'transcript': if entry.type == 'exon': exonL.append(entry) try: entry = gffI.next() count += 1 except StopIteration: eof = True # The gencode gtf file has one based, end inclusive coordinates for # exons. HTSeq represents intervals as zero based, end exclusive. # We need one-based, end inclusive to compare with STAR output. if len(exonL) > 1: chrom = exonL[0].iv.chrom # On the minus strand, order of exons in gtf file is reversed. if strand == '-': exonL.reverse() # We take the exclusive end of the exon intervals and add one to # make the one-based start of the intron. startL = [ x.iv.end + 1 for x in exonL[:-1] ] # The zero-based inclusive start of the exon is the one-based # inclusive end of the intron. endL = [ x.iv.start for x in exonL[1:] ] for i in range(len(startL)): start = startL[i] end = endL[i] jxn = '{0}:{1}-{2}:{3}'.format(chrom, start, end, strand) chrstart = '{}:{}'.format(chrom, start) chrend = '{}:{}'.format(chrom, end) donor = _gencode_donor(chrom, start, end, strand) acceptor = _gencode_acceptor(chrom, start, end, strand) intron = '{}:{}-{}'.format(chrom, start, end) juncL.append([jxn, gene, chrom, str(start), str(end), strand, chrstart, chrend, donor, acceptor, intron]) else: try: entry = gffI.next() count += 1 except StopIteration: eof = True last_count += 1 header = ['gene', 'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor', 'acceptor', 'intron'] juncA = np.array(juncL) df = pd.DataFrame(juncA[:,1:], index=juncA[:,0], columns=header).drop_duplicates() df['start'] = df.start.astype(int) df['end'] = df.end.astype(int) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME): """ Decorator for views that, if required, checks that the user is logged in and redirect to the log-in page if necessary. """
if required: if django.VERSION < (1, 11): actual_decorator = user_passes_test( lambda u: u.is_authenticated(), redirect_field_name=redirect_field_name ) else: actual_decorator = user_passes_test( lambda u: u.is_authenticated, redirect_field_name=redirect_field_name ) if function: return actual_decorator(function) return actual_decorator # login not required def decorator(view_func): def _wrapper(request, *args, **kwargs): return function(request, *args, **kwargs) return wraps(function)(_wrapper) return method_decorator(decorator)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_table_existed(self, tablename): """ Check whether the given table name exists in this database. Return boolean. """
all_tablenames = self.list_tables() tablename = tablename.lower() if tablename in all_tablenames: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop_table(self, tablename, silent=False): """ Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing """
if not silent and not self.is_table_existed(tablename): raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename) self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename)) self.__db.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calendar(ctx, date, agenda, year): """Show a 3-month calendar of meetups. \b date: The date around which the calendar is centered. May be: - YYYY-MM-DD, YY-MM-DD, YYYY-MM or YY-MM (e.g. 2015-08) - MM (e.g. 08): the given month in the current year - pN (e.g. p1): N-th last month - +N (e.g. +2): N-th next month - Omitted: today - YYYY: Show the entire year, as with -y """
do_full_year = year today = ctx.obj['now'].date() db = ctx.obj['db'] term = ctx.obj['term'] date_info = cliutil.parse_date(date) if 'relative' in date_info: year = today.year month = today.month + date_info['relative'] elif 'date_based' in date_info: year = date_info.get('year', today.year) month = date_info.get('month', today.month) if 'month' not in date_info and 'day' not in date_info: do_full_year = True else: raise click.UsageError('Unknown date format') if agenda is None: agenda = not do_full_year if do_full_year: first_month = 1 num_months = 12 else: first_month = month - 1 num_months = 3 calendar = get_calendar(db, year, first_month, num_months) cliutil.handle_raw_output(ctx, list(calendar.values())) render_calendar(term, calendar, today, agenda)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def register(self, obj): ''' register all methods for of an object as json rpc methods obj - object with methods ''' for method in dir(obj): #ignore private methods if not method.startswith('_'): fct = getattr(obj, method) #only handle functions try: getattr(fct, '__call__') except AttributeError: pass else: logging.debug('JSONRPC: Found Method: "%s"' % method) self._methods[method] = { 'argspec': inspect.getargspec(fct), 'fct': fct }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_format(req): ''' Validate jsonrpc compliance of a jsonrpc-dict. req - the request as a jsonrpc-dict raises SLOJSONRPCError on validation error ''' #check for all required keys for key in SLOJSONRPC._min_keys: if not key in req: logging.debug('JSONRPC: Fmt Error: Need key "%s"' % key) raise SLOJSONRPCError(-32600) #check all keys if allowed for key in req.keys(): if not key in SLOJSONRPC._allowed_keys: logging.debug('JSONRPC: Fmt Error: Not allowed key "%s"' % key) raise SLOJSONRPCError(-32600) #needs to be jsonrpc 2.0 if req['jsonrpc'] != '2.0': logging.debug('JSONRPC: Fmt Error: "jsonrpc" needs to be "2.0"') raise SLOJSONRPCError(-32600)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_params(self, req): ''' Validate parameters of a jsonrpc-request. req - request as a jsonrpc-dict raises SLOJSONRPCError on validation error ''' #does the method exist? method = req['method'] if not method in self._methods: raise SLOJSONRPCError(-32601) fct = self._methods[method]['fct'] #'id' is only needed for none SLOJSONRPCNotification's try: getattr(fct, '__SLOJSONRPCNotification__') if 'id' in req: logging.debug('JSONRPC: Fmt Error: no id for SLOJSONRPCNotifications') raise SLOJSONRPCError(-32602) except AttributeError: if not 'id' in req: logging.debug('JSONRPC: Fmt Error: Need an id for non SLOJSONRPCNotifications') raise SLOJSONRPCError(-32602) #get arguments and defaults for the python-function representing # the method argspec = self._methods[method]['argspec'] args, defaults = list(argspec.args), \ list(argspec.defaults if argspec.defaults else []) #ignore self and session if 'self' in args: args.remove('self') args.remove('session') #create required arguments. delete the ones with defaults required = list(args) if defaults: for default in defaults: required.pop() #check if we need paremeters and there are none, then error if len(required) > 0 and 'params' not in req: logging.debug('JSONRPC: Parameter Error: More than zero params required') raise SLOJSONRPCError(-32602) if 'params' in req: #parameters must be a dict if there is more then one if not isinstance(req['params'], dict) and len(required) > 1: logging.debug('JSONRPC: Parameter Error: "params" must be a dictionary') raise SLOJSONRPCError(-32602) if isinstance(req['params'], dict): #check if required parameters are there for key in required: if not key in req['params']: logging.debug('JSONRPC: Parameter Error: Required key "%s" is missing' % key) raise SLOJSONRPCError(-32602) #check if parameters are given that do not exist in the method for key in req['params']: if not key in required: logging.debug('JSONRPC: Parameter Error: Key is not allowed "%s"' % key) raise SLOJSONRPCError(-32602)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_request(self, req, validate=True): ''' handle a jsonrpc request req - request as jsonrpc-dict validate - validate the request? (default: True) returns jsonrpc-dict with result or error ''' #result that will be filled and returned res = {'jsonrpc': '2.0', 'id': -1, 'result': None} logging.debug('') logging.debug('--------------------REQUEST' + '--------------------\n' + json.dumps(req, sort_keys=True, indent=4, separators=(',', ': '))) logging.debug('-----------------------------------------------') notification = False if self._sessionmaker: session = self._sessionmaker() try: #validate request if validate: self._validate_format(req) self._validate_params(req) method = req['method'] #check if request is a notification try: getattr(self._methods[method]['fct'], '__SLOJSONRPCNotification__') notification = True except AttributeError: notification = False #call the python function if 'params' in req: fct = self._methods[method]['fct'] if isinstance(req['params'], dict): req['params']['session'] = session res['result'] = fct(**req['params']) else: res['result'] = fct(session, req['params']) else: res['result'] = self._methods[method]['fct'](session) except SLOJSONRPCError as e: res = e.to_json(req.get('id', None)) except: logging.debug('Uncaught Exception:') logging.debug('-------------------\n' + traceback.format_exc()) res = SLOJSONRPCError(-32603).to_json(req.get('id', None)) session.close() logging.debug('--------------------RESULT' + '--------------------\n' + json.dumps(res, sort_keys=True, indent=4, separators=(',', ': '))) logging.debug('----------------------------------------------') #return None if a notification if notification: return None elif not 'error' in res: res['id'] = req['id'] return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_string(self, strreq): ''' Handle a string representing a jsonrpc-request strreq - jsonrpc-request as a string returns jsonrpc-response as a string ''' #convert to jsonrpc-dict req = None try: req = json.loads(strreq) except: logging.debug('JSONRPC: Format Exception:') logging.debug('-----------------\n' + traceback.format_exc()) return json.dumps(SLOJSONRPCError(-32700).to_json()) #handle single request if isinstance(req, dict): return json.dumps(self.handle_request(req)) #handle multiple requests elif isinstance(req, list): for r in req: if not isinstance(r, dict): logging.debug('JSONRPC: Fmt Error: Item ' + '"%s" in request is no dictionary.' % str(r)) return json.dumps(SLOJSONRPCError(-32700).to_json()) try: self._validate_format(r) self._validate_params(r) except SLOJSONRPCError as e: return json.dumps(e.to_json(r.get('id', None))) res = [] for r in req: res.append(self.handle_request(r, validate=False)) return json.dumps(res) #invalid request else: return json.dumps(SLOJSONRPCError(-32700).to_json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def report_calls(request): ''' POST endpoint for APIs to report their statistics requires parameters: api, key, calls, date, endpoint & signature if 'api' or 'key' parameter is invalid returns a 404 if signature is bad returns a 400 returns a 200 with content 'OK' if call succeeds ''' api_obj = get_object_or_404(Api, name=request.POST['api']) # check the signature if get_signature(request.POST, api_obj.signing_key) != request.POST['signature']: return HttpResponseBadRequest('bad signature') key_obj = get_object_or_404(Key, key=request.POST['key']) calls = int(request.POST['calls']) try: # use get_or_create to update unique #calls for (date,api,key,endpoint) report,c = Report.objects.get_or_create(date=request.POST['date'], api=api_obj, key=key_obj, endpoint=request.POST['endpoint'], defaults={'calls':calls}) if not c: report.calls = calls report.save() except Exception: raise return HttpResponse('OK')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_key(request): ''' POST endpoint determining whether or not a key exists and is valid ''' api_objs = list(Api.objects.filter(name=request.POST['api'])) if not api_objs: return HttpResponseBadRequest('Must specify valid API') # check the signature if get_signature(request.POST, api_objs[0].signing_key) != request.POST['signature']: return HttpResponseBadRequest('bad signature') get_object_or_404(Key, key=request.POST['key'], status='A') return HttpResponse('OK')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def register(request, email_template='locksmith/registration_email.txt', registration_template=getattr(settings, 'LOCKSMITH_REGISTER_TEMPLATE', 'locksmith/register.html'), registered_template=getattr(settings, 'LOCKSMITH_REGISTERED_TEMPLATE', 'locksmith/registered.html'), ): ''' API registration view displays/validates form and sends email on successful submission ''' if request.method == 'POST': form = KeyForm(request.POST) if form.is_valid(): newkey = form.save(commit=False) newkey.key = uuid.uuid4().hex newkey.status = 'U' newkey.save() send_key_email(newkey, email_template) return render_to_response(registered_template, {'key': newkey, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE }, context_instance=RequestContext(request)) else: form = KeyForm() return render_to_response(registration_template, {'form':form, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE}, context_instance=RequestContext(request))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def confirm_registration(request, key, template="locksmith/confirmed.html"): ''' API key confirmation visiting this URL marks a Key as ready for use ''' context = {'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE} try: context['key'] = key_obj = Key.objects.get(key=key) if key_obj.status != 'U': context['error'] = 'Key Already Activated' else: key_obj.status = 'A' key_obj.save() key_obj.mark_for_update() except Key.DoesNotExist: context['error'] = 'Invalid Key' return render_to_response(template, context, context_instance=RequestContext(request))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def profile(request): ''' Viewing of signup details and editing of password ''' context = {} if request.method == 'POST': form = PasswordChangeForm(request.user, request.POST) if form.is_valid(): form.save() messages.info(request, 'Password Changed.') else: form = PasswordChangeForm(request.user) key = Key.objects.get(email=request.user.email) #analytics endpoint_q = key.reports.values('api__name', 'endpoint').annotate(calls=Sum('calls')).order_by('-calls') endpoints = [{'endpoint':'.'.join((d['api__name'], d['endpoint'])), 'calls': d['calls']} for d in endpoint_q] date_q = key.reports.values('date').annotate(calls=Sum('calls')).order_by('date') context['endpoints'], context['endpoint_calls'] = _dictlist_to_lists(endpoints, 'endpoint', 'calls') context['timeline'] = date_q context['form'] = form context['key'] = key context['password_is_key'] = request.user.check_password(key.key) return render_to_response('locksmith/profile.html', context, context_instance=RequestContext(request))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _dictlist_to_lists(dl, *keys): ''' convert a list of dictionaries to a dictionary of lists >>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444}, {'a': 'wow', 'b': 300}] >>> _dictlist_to_lists(dl) (['test', 'zaz', 'wow'], [3, 444, 300]) ''' lists = [] for k in keys: lists.append([]) for item in dl: for i, key in enumerate(keys): x = item[key] if isinstance(x, unicode): x = str(x) lists[i].append(x) return lists
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _cumulative_by_date(model, datefield): ''' Given a model and date field, generate monthly cumulative totals. ''' monthly_counts = defaultdict(int) for obj in model.objects.all().order_by(datefield): datevalue = getattr(obj, datefield) monthkey = (datevalue.year, datevalue.month) monthly_counts[monthkey] += 1 if len(monthly_counts) == 0: return [] earliest_month = min(monthly_counts.iterkeys()) latest_month = max(monthly_counts.iterkeys()) accumulator = 0 cumulative_counts = [] for (year, month) in cycle_generator(cycle=(1, 12), begin=earliest_month, end=latest_month): mcount = monthly_counts.get((year, month), 0) accumulator += mcount cumulative_counts.append([datetime.date(year, month, 1), accumulator]) return cumulative_counts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def factory(codes, base=_Exception): """ Creates a custom exception class with arbitrary error codes and arguments. """
if not issubclass(base, _Exception): raise FactoryException("Invalid class passed as parent: Must be a subclass of an Exception class created with this function", FactoryException.INVALID_EXCEPTION_CLASS, intended_parent=base) class Error(base): pass if isinstance(codes, (list, set, tuple, frozenset)): codes = {e: e for e in codes} if not isinstance(codes, dict): raise FactoryException("Factory codes must be a dict str -> object", FactoryException.INVALID_CODES_LIST, intended_codes=codes) for code, value in codes.items(): try: setattr(Error, code, value) except TypeError: raise FactoryException("Cannot set class attribute: (%r) -> (%r)" % (code, value), FactoryException.INVALID_CODE_VALUE, attribute=code, value=value) return Error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Save the current instance to the DB """
with rconnect() as conn: try: self.validate() except ValidationError as e: log.warn(e.messages) raise except ModelValidationError as e: log.warn(e.messages) raise except ModelConversionError as e: log.warn(e.messages) raise except ValueError as e: log.warn(e) raise except FrinkError as e: log.warn(e.messages) raise except Exception as e: log.warn(e) raise else: # If this is a new unsaved object, it'll likely have an # id of None, which RethinkDB won't like. So if it's None, # generate a UUID for it. If the save fails, we should re-set # it to None. if self.id is None: self.id = str(uuid.uuid4()) log.debug(self.id) try: query = r.db(self._db).table(self._table).insert( self.to_primitive(), conflict="replace" ) log.debug(query) rv = query.run(conn) # Returns something like this: # { # u'errors': 0, # u'deleted': 0, # u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'], # u'unchanged': 0, # u'skipped': 0, # u'replaced': 0, # u'inserted': 1 # } log.debug(rv) except Exception as e: log.warn(e) self.id = None raise else: return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """ Delete the current instance from the DB. """
with rconnect() as conn: # Can't delete an object without an ID. if self.id is None: raise FrinkError("You can't delete an object with no ID") else: if isinstance(self.id, uuid.UUID): self.id = str(self.id) try: query = r.db( self._db ).table( self._table ).get( self.id ).delete() log.debug(query) rv = query.run(conn) except Exception as e: log.warn(e) raise else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, id): """ Get a single instance by pk id. :param id: The UUID of the instance you want to retrieve. """
with rconnect() as conn: if id is None: raise ValueError if isinstance(id, uuid.UUID): id = str(id) if type(id) != str and type(id) != unicode: raise ValueError try: query = self._base().get(id) log.debug(query) rv = query.run(conn) except ReqlOpFailedError as e: log.warn(e) raise except Exception as e: log.warn(e) raise if rv is not None: return self._model(rv) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, order_by=None, limit=0, **kwargs): """ Fetch a list of instances. :param order_by: column on which to order the results. \ To change the sort, prepend with < or >. :param limit: How many rows to fetch. :param kwargs: keyword args on which to filter, column=value """
with rconnect() as conn: if len(kwargs) == 0: raise ValueError try: query = self._base() query = query.filter(kwargs) if order_by is not None: query = self._order_by(query, order_by) if limit > 0: query = self._limit(query, limit) log.debug(query) rv = query.run(conn) except ReqlOpFailedError as e: log.warn(e) raise except Exception as e: log.warn(e) raise else: data = [self._model(_) for _ in rv] return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all(self, order_by=None, limit=0): """ Fetch all items. :param limit: How many rows to fetch. :param order_by: column on which to order the results. \ To change the sort, prepend with < or >. """
with rconnect() as conn: try: query = self._base() if order_by is not None: query = self._order_by(query, order_by) if limit > 0: query = self._limit(query, limit) log.debug(query) rv = query.run(conn) except Exception as e: log.warn(e) raise else: data = [self._model(_) for _ in rv] return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def opterate(func): '''A decorator for a main function entry point to a script. It automatically generates the options for the main entry point based on the arguments, keyword arguments, and docstring. All keyword arguments in the function definition are options. Positional arguments are mandatory arguments that store a string value. Varargs become a variable length (zero allowed) list of positional arguments. Varkwargs are currently ignored. The default value assigned to a keyword argument helps determine the type of option and action. The defalut value is assigned directly to the parser's default for that option. In addition, it determines the ArgumentParser action -- a default value of False implies store_true, while True implies store_false. If the default value is a list, the action is append (multiple instances of that option are permitted). Strings or None imply a store action. Options are further defined in the docstring. The top part of the docstring becomes the usage message for the app. Below that, ReST-style :param: lines in the following format describe the option: :param variable_name: -v --verbose the help_text for the variable :param variable_name: -v the help_text no long option :param variable_name: --verbose the help_text no short option the format is: :param name: [short option and/or long option] help text Variable_name is the name of the variable in the function specification and must refer to a keyword argument. All options must have a :param: line like this. If you can have an arbitrary length of positional arguments, add a *arglist variable; It can be named with any valid python identifier. See opterator_test.py and examples/ for some examples.''' ( positional_params, kw_params, varargs, defaults, annotations ) = portable_argspec(func) description = '' param_docs = {} if func.__doc__: param_doc = func.__doc__.split(':param') description = param_doc.pop(0).strip() for param in param_doc: param_args = param.split() variable_name = param_args.pop(0)[:-1] param_docs[variable_name] = param_args parser = ArgumentParser(description=description) option_generator = generate_options() next(option_generator) for param in positional_params: parser.add_argument(param, help=" ".join(param_docs.get(param, []))) for param in kw_params: default = defaults[kw_params.index(param)] names = [] param_doc = [] if param in annotations: names = annotations[param] if param in param_docs: param_doc = param_docs.get(param, []) while param_doc and param_doc[0].startswith('-'): names.append(param_doc.pop(0)) names = names if names else option_generator.send(param) option_kwargs = { 'action': 'store', 'help': ' '.join(param_doc), 'dest': param, 'default': default } if default is False: option_kwargs['action'] = 'store_true' elif default is True: option_kwargs['action'] = 'store_false' elif type(default) in (list, tuple): if default: option_kwargs['choices'] = default else: option_kwargs['action'] = 'append' parser.add_argument(*names, **option_kwargs) if varargs: parser.add_argument(varargs, nargs='*') def wrapper(argv=None): args = vars(parser.parse_args(argv)) processed_args = [args[p] for p in positional_params + kw_params] if varargs: processed_args.extend(args[varargs]) func(*processed_args) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_setup(self): """ Provide a helper script for the user to setup completion. """
shell = os.getenv('SHELL') if not shell: raise SystemError("No $SHELL env var found") shell = os.path.basename(shell) if shell not in self.script_body: raise SystemError("Unsupported shell: %s" % shell) tplvars = { "prog": '-'.join(self.prog.split()[:-1]), "shell": shell, "name": self.name } print(self.trim(self.script_header % tplvars)) print(self.trim(self.script_body[shell] % tplvars)) print(self.trim(self.script_footer % tplvars))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trim(self, text): """ Trim whitespace indentation from text. """
lines = text.splitlines() firstline = lines[0] or lines[1] indent = len(firstline) - len(firstline.lstrip()) return '\n'.join(x[indent:] for x in lines if x.strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync_client(self): """Synchronous OAuth 2.0 Bearer client"""
if not self._sync_client: self._sync_client = AlfSyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._sync_client
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def async_client(self): """Asynchronous OAuth 2.0 Bearer client"""
if not self._async_client: self._async_client = AlfAsyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._async_client
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_package_version(): """returns package version without importing it"""
base = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(base, "firepit/__init__.py")) as pkg: for line in pkg: m = version.match(line.strip()) if not m: continue return ".".join(m.groups()[0].split(", "))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recognise(self, string, line_num): """ Splits the string into chars and distributes these into the buckets of IPA and non-IPA symbols. Expects that there are no precomposed chars in the string. """
symbols = [] unknown = [] for char in string: if char == SPACE: continue try: name = unicodedata.name(char) except ValueError: name = 'UNNAMED CHARACTER {}'.format(ord(char)) if char in self.ipa: symbol = Symbol(char, name, self.ipa[char]) symbols.append(symbol) self.ipa_symbols[symbol].append(line_num) else: symbol = UnknownSymbol(char, name) unknown.append(symbol) self.unk_symbols[symbol].append(line_num) return tuple(symbols), tuple(unknown)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report(self, reporter): """ Adds the problems that have been found so far to the given Reporter instance. """
for symbol in sorted(self.unk_symbols.keys()): err = '{} ({}) is not part of IPA'.format(symbol.char, symbol.name) if symbol.char in self.common_err: repl = self.common_err[symbol.char] err += ', suggested replacement is {}'.format(repl) if len(repl) == 1: err += ' ({})'.format(unicodedata.name(repl)) reporter.add(self.unk_symbols[symbol], err)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parsing(self): """Parameters for parsing directory trees"""
with gui.FlexForm(self.title, auto_size_text=True, default_element_size=(40, 1)) as form: layout = [ [gui.Text('Directory Paths utility', size=(30, 1), font=("Helvetica", 25), text_color='blue')], # Source [gui.Text('Source Folder', size=(15, 1), auto_size_text=False), gui.InputText('Source'), gui.FolderBrowse()], # Parallel / Sequential [gui.Text('Parallel or Sequential Processing. Larger directory trees are typically parsed faster ' 'using parallel processing.')], [gui.Radio('Parallel Processing', "RADIO1"), gui.Radio('Sequential Processing', "RADIO1", default=True)], [_line()], # Files and non-empty-folders [gui.Text('Return files or folders, returning folders is useful for creating inventories.')], [gui.Radio('Return Files', "RADIO2", default=True), gui.Radio('Return Non-Empty Directories', "RADIO2")], [_line()], # max_level [gui.Text('Max Depth.... Max number of sub directory depths to traverse (starting directory is 0)')], [gui.InputCombo(list(reversed(range(0, 13))), size=(20, 3))], [_line()], # Relative and absolute [gui.Text('Relative or Absolute Paths. Relative paths are saved relative to the starting directory. ' 'Absolute paths are saved as full paths.')], [gui.Radio('Relative Paths', "RADIO3", default=True), gui.Radio('Absolute Paths', "RADIO3")], [_line()], # Topdown and output [gui.Checkbox('Topdown Parse', default=True), gui.Checkbox('Live output results')], [_line()], # Save results to file [gui.Checkbox('Save Results to File', default=False)], [gui.Submit(), gui.Cancel()]] (button, (values)) = form.LayoutAndShow(layout) # gui.MsgBox(self.title, 'Parameters set', 'The results of the form are... ', # 'The button clicked was "{}"'.format(button), 'The values are', values, auto_close=True) self.params['parse'] = { 'directory': values[0], 'parallelize': values[1], 'sequential': values[2], 'yield_files': values[3], 'non_empty_folders': values[4], 'max_level': int(values[5]), '_relative': values[6], 'full_paths': values[7], 'topdown': values[8], 'console_stream': values[9], 'save_file': values[10], } if self.params['parse']['save_file']: self._saving() return self.params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def source(self): """Parameters for saving zip backups"""
with gui.FlexForm(self.title, auto_size_text=True, default_element_size=(40, 1)) as form: layout = [ [gui.Text('Zip Backup utility', size=(30, 1), font=("Helvetica", 30), text_color='blue')], [gui.Text('Create a zip backup of a file or directory.', size=(50, 1), font=("Helvetica", 18), text_color='black')], [gui.Text('-' * 200)], # Source [gui.Text('Select source folder', size=(20, 1), font=("Helvetica", 25), auto_size_text=False), gui.InputText('', key='source', font=("Helvetica", 20)), gui.FolderBrowse()], [gui.Submit(), gui.Cancel()]] button, values = form.LayoutAndRead(layout) if button == 'Submit': return values['source'] else: exit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def contains(self, desired): '''Return the filter closure fully constructed.''' field = self.__field def aFilter(testDictionary): return (desired in testDictionary[field]) return aFilter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_valid_file(path): ''' Returns True if provided file exists and is a file, or False otherwise. ''' return os.path.exists(path) and os.path.isfile(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_valid_dir(path): ''' Returns True if provided directory exists and is a directory, or False otherwise. ''' return os.path.exists(path) and os.path.isdir(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_readable(path): ''' Returns True if provided file or directory exists and can be read with the current user. Returns False otherwise. ''' return os.access(os.path.abspath(path), os.R_OK)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(command, get_output=False, cwd=None): """By default, run all commands at GITPATH directory. If command fails, stop program execution. """
if cwd is None: cwd = GITPATH cprint('===') cprint('=== Command: ', command) cprint('=== CWD: ', cwd) cprint('===') if get_output: proc = capture_stdout(command, cwd=cwd) out = proc.stdout.read().decode() print(out, end='') check_exit_code(proc.returncode) return out else: proc = sarge_run(command, cwd=cwd) check_exit_code(proc.returncode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_argument_parser(executable): """creates an argument parser from the given `executable` model. An argument '__xml__' for "--xml" is added independently. :param executable: CLI Model :type executable: clictk.model.Executable :return: """
a = ArgumentParser() a.add_argument("--xml", action="store_true", dest="__xml__", help="show cli xml") for p in executable: o = [] if p.flag: o.append("-%s" % p.flag) if p.longflag: o.append("--%s" % p.longflag) a.add_argument( *o, metavar=p.type.upper(), dest=p.name, action="store", help=(p.description.strip() or "parameter %s" % p.name) ) return a
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, d): """ Set this person from dict :param d: Dictionary representing a person ('sitting'[, 'id']) :type d: dict :rtype: Person :raises KeyError: 'sitting' not set """
self.sitting = d['sitting'] self.id = d.get('id', None) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_tuple(self, t): """ Set this person from tuple :param t: Tuple representing a person (sitting[, id]) :type t: (bool) | (bool, None | str | unicode | int) :rtype: Person """
if len(t) > 1: self.id = t[0] self.sitting = t[1] else: self.sitting = t[0] self.id = None return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def meta_bar_chart(series=None, N=20): "Each column in the series is a dict of dicts" if not series or isinstance(series, basestring): series = json.load(load_app_meta) if isinstance(series, Mapping) and isinstance(series.values()[0], Mapping): rows_received = series['# Received'].items() elif isinstance(series, Mapping): rows_received = series.items() else: rows_received = list(series) #rows = sorted(rows, key=operator.itemgetter(1), reverse=True) rows = sorted(rows_received, key=lambda x: x[1], reverse=True) received_names, received_qty = zip(*rows) ra_qty = [(series['Qty in RA'].get(name, 0.) or 0.) for name in received_names] # percent = [100. - 100. * (num or 0.) / (den or 1.) for num, den in zip(received_qty, ra_qty)] # only care about the top 30 model numbers in terms of quantity #ind = range(N) figs = [] figs += [plt.figure()] ax = figs[-1].add_subplot(111) ax.set_ylabel('# Units Returned') ax.set_title('Most-Returned LCDTV Models 2013-present') x = np.arange(N) bars1 = ax.bar(x, received_qty[:N], color='b', width=.4, log=1) bars2 = ax.bar(x+.4, ra_qty[:N], color='g', width=.4, log=1) ax.set_xticks(range(N)) ax.set_xticklabels(received_names[:N], rotation=35) ax.grid(True) ax.legend((bars1[0], bars2[0]), ('# in RA', '# Received'), 'center right') figs[-1].show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def index_with_dupes(values_list, unique_together=2, model_number_i=0, serial_number_i=1, verbosity=1): '''Create dict from values_list with first N values as a compound key. Default N (number of columns assumbed to be "unique_together") is 2. >>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]}) True ''' try: N = values_list.count() except: N = len(values_list) if verbosity > 0: print 'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).' % N index, dupes = {}, {} pbar = None if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))): widgets = [pb.Counter(), '%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()] pbar = pb.ProgressBar(widgets=widgets, maxval=N).start() rownum = 0 for row in values_list: normalized_key = [str(row[model_number_i]).strip(), str(row[serial_number_i]).strip()] normalized_key += [i for i in range(unique_together) if i not in (serial_number_i, model_number_i)] normalized_key = tuple(normalized_key) if normalized_key in index: # need to add the first nondupe before we add the dupes to the list if normalized_key not in dupes: dupes[normalized_key] = [index[normalized_key]] dupes[normalized_key] = dupes[normalized_key] + [row] if verbosity > 2: print 'Duplicate "unique_together" tuple found. Here are all the rows that match this key:' print dupes[normalized_key] else: index[normalized_key] = row if pbar: pbar.update(rownum) rownum += 1 if pbar: pbar.finish() if verbosity > 0: print 'Found %d duplicate model-serial pairs in the %d records or %g%%' % (len(dupes), len(index), len(dupes)*100./(len(index) or 1.)) return index, dupes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def index_model_field_batches(model_or_queryset, key_fields=['model_number', 'serial_number'], value_fields=['pk'], key_formatter=lambda x: str.lstrip(str.strip(str(x or '')), '0'), value_formatter=lambda x: str.strip(str(x)), batch_len=10000, limit=100000000, verbosity=1): '''Like index_model_field except uses 50x less memory and 10x more processing cycles Returns 2 dicts where both the keys and values are tuples: target_index = {(<key_fields[0]>, <key_fields[1]>, ...): (<value_fields[0]>,)} for all distinct model-serial pairs in the Sales queryset target_dupes = {(<key_fields[0]>, <key_fields[1]>, ...): [(<value_fields[1]>,), (<value_fields[2]>,), ...]} with all the duplicates except the first pk already listed above ''' qs = djdb.get_queryset(model_or_queryset) N = qs.count() if verbosity > 0: print 'Indexing %d rows (database records) to aid in finding record %r values using the field %r.' % (N, value_fields, key_fields) index, dupes, rownum = {}, {}, 0 pbar, rownum = None, 0 if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))): widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()] pbar = pb.ProgressBar(widgets=widgets, maxval=N).start() # to determine the type of the field value and decide whether to strip() or normalize in any way #obj0 = qs.filter(**{field + '__isnull': False}).all()[0] value_fields = util.listify(value_fields) key_fields = util.listify(key_fields) for batch in djdb.generate_queryset_batches(qs, batch_len=batch_len, verbosity=verbosity): for obj in batch: # print obj # normalize the key keys = [] for kf in key_fields: k = getattr(obj, kf) keys += [key_formatter(k or '')] values = [] keys = tuple(keys) for vf in value_fields: v = getattr(obj, vf) values += [value_formatter(v or '')] values = tuple(values) if keys in index: dupes[keys] = dupes.get(keys, []) + [values] else: index[keys] = values # print rownum / float(N) if pbar: pbar.update(rownum) rownum += 1 if rownum >= limit: break if pbar: pbar.finish() if verbosity > 0: print 'Found %d duplicate %s values among the %d records or %g%%' % (len(dupes), key_fields, len(index), len(dupes)*100./(len(index) or 1.)) return index, dupes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_index(model_meta, weights=None, verbosity=0): """Return a tuple of index metadata for the model metadata dict provided return value format is: ( field_name, { 'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index }, score, ) """
weights = weights or find_index.default_weights N = model_meta['Meta'].get('count', 0) for field_name, field_meta in model_meta.iteritems(): if field_name == 'Meta': continue pkfield = field_meta.get('primary_key') if pkfield: if verbosity > 1: print pkfield # TODO: Allow more than one index per model/table return { field_name: { 'primary_key': True, 'unique': field_meta.get('unique') or ( N >= 3 and field_meta.get('num_null') <= 1 and field_meta.get('num_distinct') == N), }} score_names = [] for field_name, field_meta in model_meta.iteritems(): score = 0 for feature, weight in weights: # for categorical features (strings), need to look for a particular value value = field_meta.get(feature) if isinstance(weight, tuple): if value is not None and value in (float, int): score += weight * value if callable(weight[1]): score += weight[0] * weight[1](field_meta.get(feature)) else: score += weight[0] * (field_meta.get(feature) == weight[1]) else: feature_value = field_meta.get(feature) if feature_value is not None: score += weight * field_meta.get(feature) score_names += [(score, field_name)] max_name = max(score_names) field_meta = model_meta[max_name[1]] return ( max_name[1], { 'primary_key': True, 'unique': field_meta.get('unique') or ( N >= 3 and field_meta.get('num_null') <= 1 and field_meta.get('num_distinct') == N), }, max_name[0], )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_unique(table, field=-1): """Use the Django ORM or collections.Counter to count unique values of a field in a table `table` is one of: 1. An iterable of Django model instances for a database table (e.g. a Django queryset) 2. An iterable of dicts or lists with elements accessed by row[field] where field can be an integer or string 3. An iterable of objects or namedtuples with elements accessed by `row.field` `field` can be any immutable object (the key or index in a row of the table that access the value to be counted) """
from collections import Counter # try/except only happens once, and fastest route (straight to db) tried first try: ans = {} for row in table.distinct().values(field).annotate(field_value_count=models.Count(field)): ans[row[field]] = row['field_value_count'] return ans except: try: return Counter(row[field] for row in table) except: try: return Counter(row.get(field, None) for row in table) except: try: return Counter(row.getattr(field, None) for row in table) except: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def startGraph(self): """Starts RDF graph and bing namespaces"""
g = r.Graph() g.namespace_manager.bind("rdf", r.namespace.RDF) g.namespace_manager.bind("foaf", r.namespace.FOAF) g.namespace_manager.bind("xsd", r.namespace.XSD) g.namespace_manager.bind("opa", "http://purl.org/socialparticipation/opa/") g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops/") g.namespace_manager.bind("wsg", "http://www.w3.org/2003/01/geo/wgs84_pos#") g.namespace_manager.bind("dc2", "http://purl.org/dc/elements/1.1/") g.namespace_manager.bind("dc", "http://purl.org/dc/terms/") g.namespace_manager.bind("sioc", "http://rdfs.org/sioc/ns#") g.namespace_manager.bind("tsioc", "http://rdfs.org/sioc/types#") g.namespace_manager.bind("schema", "http://schema.org/") g.namespace_manager.bind("part", "http://participa.br/") self.g=g
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def triplifyPortalInfo(self): """Make triples with information about the portal. """
uri=self.P.opa.ParticipationPortal+self.separator+"participabr" self.X.G(uri,self.P.rdf.type,self.P.opa.ParticipationPortal) self.X.G(uri,self.P.opa.description,self.X.L(DATA.portal_description,self.P.xsd.string)) self.X.G(uri,self.P.opa.url,self.X.L("http://participa.br/",self.P.xsd.string))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def triplifyOverallStructures(self): """Insert into RDF graph the textual and network structures. Ideally, one should be able to make bag of words related to each item (communities, users, posts, comments, tags, etc). Interaction and friendship networks should be made. Human networks mediated by co-ocurrance (time os posts, geographical locations, vocabulary, etc) should be addressed as well. """
if self.compute_networks: self.computeNetworks() if self.compute_bows: self.computeBows()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def db_create(): """Create the database"""
try: migrate_api.version_control(url=db_url, repository=db_repo) db_upgrade() except DatabaseAlreadyControlledError: print 'ERROR: Database is already version controlled.'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def db_downgrade(version): """Downgrade the database"""
v1 = get_db_version() migrate_api.downgrade(url=db_url, repository=db_repo, version=version) v2 = get_db_version() if v1 == v2: print 'No changes made.' else: print 'Downgraded: %s ... %s' % (v1, v2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def db_upgrade(version=None): """Upgrade the database"""
v1 = get_db_version() migrate_api.upgrade(url=db_url, repository=db_repo, version=version) v2 = get_db_version() if v1 == v2: print 'Database already up-to-date.' else: print 'Upgraded: %s ... %s' % (v1, v2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_npm_modules(): """Uses npm to dependencies in node.json"""
# This is a little weird, but we do it this way because if you # have package.json, then heroku thinks this might be a node.js # app. call_command('cp node.json package.json', verbose=True) call_command('npm install', verbose=True) call_command('rm package.json', verbose=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def defer( self, func: typing.Callable[[], typing.Any], until: typing.Union[int, float]=-1, ) -> typing.Any: """Defer the execution of a function until some clock value. Args: func (typing.Callable[[], typing.Any]): A callable that accepts no arguments. All return values are ignored. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: typing.Any: An opaque identifier that represents the callback uniquely within the processor. This identifier is used to modify the callback scheduling. Note: The time given should not be considered absolute. It represents the time when the callback becomes available to execute. It may be much later than the given time value when the function actually executes depending on the implementation. """
raise NotImplementedError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delay( self, identifier: typing.Any, until: typing.Union[int, float]=-1, ) -> bool: """Delay a deferred function until the given time. Args: identifier (typing.Any): The identifier returned from a call to defer or defer_for. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed. """
raise NotImplementedError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_email(self, to): """ Do work. """
body = self.body() subject = self.subject() import letter class Message(letter.Letter): Postie = letter.DjangoPostman() From = getattr(settings, 'DEFAULT_FROM_EMAIL', 'contact@example.com') To = to Subject = subject Body = body if hasattr(self, 'reply_to'): Message.ReplyTo = self.reply_to() Message.send() return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def form_valid(self, form): """ Praise be, someone has spammed us. """
form.send_email(to=self.to_addr) return super(EmailView, self).form_valid(form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_model(self, model_cls): """Decorator for registering model."""
if not getattr(model_cls, '_database_'): raise ModelAttributeError('_database_ missing ' 'on %s!' % model_cls.__name__) if not getattr(model_cls, '_collection_'): raise ModelAttributeError('_collection_ missing ' 'on %s!' % model_cls.__name__) model_cls._mongo_client_ = self.client logging.info('Registering Model ' + model_cls.__name__) return model_cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_counter(self, base=None, database='counter', collection='counters'): """Register the builtin counter model, return the registered Counter class and the corresponding ``CounterMixin`` class. The ``CounterMixin`` automatically increases and decreases the counter after model creation(save without ``_id``) and deletion. It contains a classmethod ``count()`` which returns the current count of the model collection."""
Counter._database_ = database Counter._collection_ = collection bases = (base, Counter) if base else (Counter,) counter = self.register_model(type('Counter', bases, {})) class CounterMixin(object): """Mixin class for model""" @classmethod def inc_counter(cls): """Wrapper for ``Counter.increase()``.""" return counter.increase(cls._collection_) @classmethod def dec_counter(cls): """Wrapper for ``Counter.decrease()``.""" return counter.decrease(cls._collection_) @classmethod def chg_counter(cls, *args, **kwargs): """Wrapper for ``Counter.change_by()``.""" return counter.change_by(cls._collection_, *args, **kwargs) @classmethod def set_counter(cls, *args, **kwargs): """Wrapper for ``Counter.set_to()``.""" return counter.set_to(cls._collection_, *args, **kwargs) def on_save(self, old_dict): super(CounterMixin, self).on_save(old_dict) if not old_dict.get('_id'): counter.increase(self._collection_) def on_delete(self, *args, **kwargs): super(CounterMixin, self).on_delete(*args, **kwargs) counter.decrease(self._collection_) @classmethod def count(cls): """Return the current count of this collection.""" return counter.count(cls._collection_) logging.info('Counter enabled on: %s' % counter.collection) return counter, CounterMixin
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def by_id(cls, oid): """Find a model object by its ``ObjectId``, ``oid`` can be string or ObjectId"""
if oid: d = cls.collection.find_one(ObjectId(oid)) if d: return cls(**d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(cls, d): """Build model object from a dict. Will be removed in v1.0"""
warnings.warn( 'from_dict is deprecated and will be removed in v1.0!', stacklevel=2) d = d or {} return cls(**d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(cls, *args, **kwargs): """Same as ``collection.find``, returns model object instead of dict."""
return cls.from_cursor(cls.collection.find(*args, **kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_one(cls, *args, **kwargs): """Same as ``collection.find_one``, returns model object instead of dict."""
d = cls.collection.find_one(*args, **kwargs) if d: return cls(**d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reload(self, d=None): """Reload model from given dict or database."""
if d: self.clear() self.update(d) elif self.id: new_dict = self.by_id(self._id) self.clear() self.update(new_dict) else: # should I raise an exception here? # Like "Model must be saved first." pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """Save model object to database."""
d = dict(self) old_dict = d.copy() _id = self.collection.save(d) self._id = _id self.on_save(old_dict) return self._id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """Remove from database."""
if not self.id: return self.collection.remove({'_id': self._id}) self.on_delete(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_to(cls, name, num): """Set counter of ``name`` to ``num``."""
if num < 0: raise CounterValueError('Counter[%s] can not be set to %s' % ( name, num)) else: counter = cls.collection.find_and_modify( {'name': name}, {'$set': {'seq': num}}, new=True, upsert=True ) return counter['seq']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(cls, name): """Return the count of ``name``"""
counter = cls.collection.find_one({'name': name}) or {} return counter.get('seq', 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geo_filter(d): """Inspects the given Wikipedia article dict for geo-coordinates. If no coordinates are found, returns None. Otherwise, returns a new dict with the title and URL of the original article, along with coordinates."""
page = d["page"] if not "revision" in page: return None title = page["title"] if skip_article(title): LOG.info("Skipping low-value article %s", title) return None text = page["revision"]["text"] if not utils.is_str_type(text): if "#text" in text: text = text["#text"] else: return None LOG.debug("--------------------------------------------------------------") LOG.debug(title) LOG.debug("--------------------------------------------------------------") LOG.debug(text) c = find_geo_coords(text) u = wikip_url(title) """ m = hashlib.md5() m.update(u.encode("UTF-8") if hasattr(u, 'encode') else u) i = base64.urlsafe_b64encode(m.digest()).replace('=', '') """ return { #"id": i, "title": title, "url": u, "coords": c, "updated": page["revision"].get("timestamp") } if c else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def depipe(s): """Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees"""
n = 0 for i in reversed(s.split('|')): n = n / 60.0 + float(i) return n
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def skip_coords(c): """Skip coordinate strings that are not valid"""
if c == "{{coord|LAT|LONG|display=inline,title}}": # Unpopulated coord template return True if c.find("globe:") >= 0 and c.find("globe:earth") == -1: # Moon, venus, etc. return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_output(self, pause=0.05): """ Returns iterator of chunked output. :param cmd: command that would be passed to ``subprocess.Popen`` :param shell: Tells if process should be run within a shell. Default: False :param timeout: If command exceeds given ``timeout`` in seconds, ``TimeoutExceeded`` exception would be raised. Default: None :param pause: How long (in seconds) we should wait for the output. Default: 0.05 Example:: """
with self.stream as temp: for chunk in self.iter_output_for_stream(temp, pause=pause): yield chunk
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate(self): """ Tells the logger process to exit immediately. If you do not call 'flush' method before, you may lose some messages of progresses that have not been displayed yet. This method blocks until logger process has stopped. """
self.queue.put(dill.dumps(ExitCommand())) if self.process: self.process.join()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_mail(self): """Generate the email as MIMEText """
# Script info msg = "Script info : \r\n" msg = msg + "%-9s: %s" % ('Script', SOURCEDIR) + "\r\n" msg = msg + "%-9s: %s" % ('User', USER) + "\r\n" msg = msg + "%-9s: %s" % ('Host', HOST) + "\r\n" msg = msg + "%-9s: %s" % ('PID', PID) + "\r\n" # Current trace msg = msg + "\r\nCurrent trace : \r\n" for record in self.current_buffer: msg = msg + record + "\r\n" # Now add stack trace msg = msg + "\r\nFull trace : \r\n" for record in self.complete_buffer: msg = msg + record + "\r\n" # Dump ENV msg = msg + "\r\nEnvironment:" + "\r\n" environ = OrderedDict(sorted(os.environ.items())) for name, value in environ.items(): msg = msg + "%-10s = %s\r\n" % (name, value) if USE_MIME: real_msg = MIMEText(msg, _charset='utf-8') real_msg['Subject'] = self.get_subject() real_msg['To'] = ','.join(self.toaddrs) real_msg['From'] = self.fromaddr else: real_msg = EmailMessage() real_msg['Subject'] = self.get_subject() real_msg['To'] = ','.join(self.toaddrs) real_msg['From'] = self.fromaddr real_msg.set_content(msg) return real_msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_subject(self): """Generate the subject."""
level = logging.getLevelName(self.flush_level) message = self.current_buffer[0].split("\n")[0] message = message.split(']')[-1] return '{0} : {1}{2}'.format(level, SOURCE, message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_query_result(self, query_result, query_path, return_type=list, preceding_depth=None): """ Formats the query result based on the return type requested. :param query_result: (dict or str or list), yaml query result :param query_path: (str, list(str)), representing query path :param return_type: type, return type of object user desires :param preceding_depth: int, the depth to which we want to encapsulate back up config tree -1 : defaults to entire tree :return: (dict, OrderedDict, str, list), specified return type """
if type(query_result) != return_type: converted_result = self.format_with_handler(query_result, return_type) else: converted_result = query_result converted_result = self.add_preceding_dict(converted_result, query_path, preceding_depth) return converted_result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_with_handler(self, query_result, return_type): """ Uses the callable handler to format the query result to the desired return type :param query_result: the result value of our query :param return_type: desired return type :return: type, the query value as the return type requested """
handler = self.get_handler(type(query_result), return_type) return handler.format_result(query_result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_handler(query_result_type, return_type): """ Find the appropriate return type handler to convert the query result to the desired return type :param query_result_type: type, desired return type :param return_type: type, actual return type :return: callable, function that will handle the conversion """
try: return FormatterRegistry.get_by_take_and_return_type(query_result_type, return_type) except (IndexError, AttributeError, KeyError): raise IndexError( 'Could not find function in conversion list for input type %s and return type %s' % ( query_result_type, return_type))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_preceding_dict(config_entry, query_path, preceding_depth): """ Adds the preceeding config keys to the config_entry to simulate the original full path to the config entry :param config_entry: object, the entry that was requested and returned from the config :param query_path: (str, list(str)), the original path to the config_entry :param preceding_depth: int, the depth to which we are recreating the preceding config keys :return: dict, simulated config to n * preceding_depth """
if preceding_depth is None: return config_entry preceding_dict = {query_path[-1]: config_entry} path_length_minus_query_pos = len(query_path) - 1 preceding_depth = path_length_minus_query_pos - preceding_depth if preceding_depth != -1 else 0 for index in reversed(range(preceding_depth, path_length_minus_query_pos)): preceding_dict = {query_path[index]: preceding_dict} return preceding_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rebuild_config_cache(self, config_filepath): """ Loads from file and caches all data from the config file in the form of an OrderedDict to self.data :param config_filepath: str, the full filepath to the config file :return: bool, success status """
self.validate_config_file(config_filepath) config_data = None try: with open(config_filepath, 'r') as f: config_data = yaml.load(f) items = list(iteritems(config_data)) except AttributeError: items = list(config_data) self.config_file_contents = OrderedDict(sorted(items, key=lambda x: x[0], reverse=True)) self.config_filepath = config_filepath
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, query_path=None, return_type=list, preceding_depth=None, throw_null_return_error=False): """ Traverses the list of query paths to find the data requested :param query_path: (list(str), str), list of query path branches or query string Default behavior: returns list(str) of possible config headers :param return_type: (list, str, dict, OrderedDict), desired return type for the data :param preceding_depth: int, returns a dictionary containing the data that traces back up the path for x depth -1: for the full traversal back up the path None: is default for no traversal :param throw_null_return_error: bool, whether or not to throw an error if we get an empty result but no error :return: (list, str, dict, OrderedDict), the type specified from return_type :raises: exceptions.ResourceNotFoundError: if the query path is invalid """
function_type_lookup = {str: self._get_path_entry_from_string, list: self._get_path_entry_from_list} if query_path is None: return self._default_config(return_type) try: config_entry = function_type_lookup.get(type(query_path), str)(query_path) query_result = self.config_entry_handler.format_query_result(config_entry, query_path, return_type=return_type, preceding_depth=preceding_depth) return query_result except IndexError: return return_type()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_path_entry_from_string(self, query_string, first_found=True, full_path=False): """ Parses a string to form a list of strings that represents a possible config entry header :param query_string: str, query string we are looking for :param first_found: bool, return first found entry or entire list :param full_path: bool, whether to return each entry with their corresponding config entry path :return: (Generator((list, str, dict, OrderedDict)), config entries that match the query string :raises: exceptions.ResourceNotFoundError """
iter_matches = gen_dict_key_matches(query_string, self.config_file_contents, full_path=full_path) try: return next(iter_matches) if first_found else iter_matches except (StopIteration, TypeError): raise errors.ResourceNotFoundError('Could not find search string %s in the config file contents %s' % (query_string, self.config_file_contents))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_path_entry_from_list(self, query_path): """ Returns the config entry at query path :param query_path: list(str), config header path to follow for entry :return: (list, str, dict, OrderedDict), config entry requested :raises: exceptions.ResourceNotFoundError """
cur_data = self.config_file_contents try: for child in query_path: cur_data = cur_data[child] return cur_data except (AttributeError, KeyError): raise errors.ResourceNotFoundError('Could not find query path %s in the config file contents' % query_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_config_file(cls, config_filepath): """ Validates the filepath to the config. Detects whether it is a true YAML file + existance :param config_filepath: str, file path to the config file to query :return: None :raises: IOError """
is_file = os.path.isfile(config_filepath) if not is_file and os.path.isabs(config_filepath): raise IOError('File path %s is not a valid yml, ini or cfg file or does not exist' % config_filepath) elif is_file: if os.path.getsize(config_filepath) == 0: raise IOError('File %s is empty' % config_filepath) with open(config_filepath, 'r') as f: if yaml.load(f) is None: raise IOError('No YAML config was found in file %s' % config_filepath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_it(cls): """ Performs the import only once. """
if not cls in cls._FEATURES: try: cls._FEATURES[cls] = cls._import_it() except ImportError: raise cls.Error(cls._import_error_message(), cls.Error.UNSATISFIED_IMPORT_REQ) return cls._FEATURES[cls]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(author, kind): """ Attempts to read the cache to fetch missing arguments. This method will attempt to find a '.license' file in the 'CACHE_DIRECTORY', to read any arguments that were not passed to the license utility. Arguments: author (str): The author passed, if any. kind (str): The kind of license passed, if any. Throws: LicenseError, if there was a cache miss or I/O error. """
if not os.path.exists(CACHE_PATH): raise LicenseError('No cache found. You must ' 'supply at least -a and -k.') cache = read_cache() if author is None: author = read_author(cache) if kind is None: kind = read_kind(cache) return author, kind
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify_ticket_signature(self, data, sig): """Verify ticket signature. """
try: signature = base64.b64decode(sig) except TypeError as e: if hasattr(self, "debug"): print("Exception in function base64.b64decode. File %s" % (__file__)) print("%s" % e) return False if six.PY3: # To avoid "TypeError: Unicode-objects must be encoded before hashing' data = data.encode('utf-8') digest = hashlib.sha1(data).digest() if isinstance(self.pub_key, RSA.RSA_pub): try: self.pub_key.verify(digest, signature, 'sha1') except RSA.RSAError: return False return True if isinstance(self.pub_key, DSA.DSA_pub): try: return self.pub_key.verify_asn1(digest, signature) except DSA.DSAError as e: if hasattr(self, "debug"): print("Exception in function self.pub_key.verify_asn1(digest, signature). File %s" % (__file__)) print("%s" % e) return False # Unknown key type return False